From a4cf4f3c0482e918fd1cdf7317fefd7a7e37f149 Mon Sep 17 00:00:00 2001
From: QianQianowo
Date: Mon, 27 May 2024 23:03:58 +0800
Subject: [PATCH] upgrade kubernetes and go-libp2p packages
Signed-off-by: QianQianowo
---
.github/workflows/main.yaml | 2 +-
.../github.com/cheekybits/genny/LICENSE | 26 -
.../vendor/github.com/golang/mock/LICENSE | 206 +
.../vendor/github.com/google/pprof/LICENSE | 206 +
.../github.com/libp2p/go-libp2p-core/LICENSE | 8 -
.../github.com/libp2p/go-openssl/LICENSE | 195 -
.../libp2p/go-yamux/{v3 => v4}/LICENSE | 4 +-
.../marten-seemann/qtls-go1-17/LICENSE | 31 -
.../marten-seemann/qtls-go1-18/LICENSE | 31 -
.../github.com/mattn/go-pointer/LICENSE | 25 -
LICENSES/vendor/github.com/miekg/dns/LICENSE | 55 +-
LICENSES/vendor/github.com/nxadm/tail/LICENSE | 25 -
.../github.com/onsi/ginkgo/{ => v2}/LICENSE | 4 +-
.../vendor/github.com/quic-go/qpack/LICENSE | 11 +
.../qtls-go1-19/LICENSE | 4 +-
.../qtls-go1-20}/LICENSE | 4 +-
.../quic-go/LICENSE | 4 +-
.../quic-go/webtransport-go/LICENSE | 11 +
.../vendor/github.com/satori/go.uuid/LICENSE | 24 -
.../github.com/spacemonkeygo/spacelog/LICENSE | 195 -
.../vendor/go.uber.org/dig}/LICENSE | 14 +-
.../vendor/go.uber.org/fx}/LICENSE | 13 +-
.../vendor/golang.org/x/exp}/LICENSE | 4 +
LICENSES/vendor/gopkg.in/tomb.v1/LICENSE | 33 -
go.mod | 141 +-
go.sum | 435 +-
pkg/apis/module/module.go | 4 +-
pkg/tunnel/module.go | 6 +-
pkg/tunnel/tunnel.go | 7 +-
tests/e2e/k8s/http.go | 2 +-
tests/e2e/scripts/execute.sh | 2 +-
tests/e2e/traffic/traffic_suite_test.go | 2 +-
tests/e2e/traffic/traffic_test.go | 2 +-
vendor/github.com/benbjohnson/clock/clock.go | 72 +-
vendor/github.com/cespare/xxhash/v2/README.md | 31 +-
.../github.com/cespare/xxhash/v2/testall.sh | 10 +
vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +-
.../cespare/xxhash/v2/xxhash_amd64.s | 336 +-
.../cespare/xxhash/v2/xxhash_arm64.s | 183 +
.../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 +
.../cespare/xxhash/v2/xxhash_other.go | 22 +-
.../cespare/xxhash/v2/xxhash_safe.go | 1 +
.../cespare/xxhash/v2/xxhash_unsafe.go | 3 +-
vendor/github.com/cheekybits/genny/.gitignore | 26 -
.../github.com/cheekybits/genny/.travis.yml | 6 -
vendor/github.com/cheekybits/genny/README.md | 245 -
vendor/github.com/cheekybits/genny/doc.go | 2 -
.../cheekybits/genny/generic/doc.go | 2 -
.../cheekybits/genny/generic/generic.go | 13 -
vendor/github.com/cheekybits/genny/main.go | 154 -
.../cheekybits/genny/out/lazy_file.go | 38 -
.../cheekybits/genny/parse/builtins.go | 41 -
.../github.com/cheekybits/genny/parse/doc.go | 14 -
.../cheekybits/genny/parse/errors.go | 47 -
.../cheekybits/genny/parse/parse.go | 298 -
.../cheekybits/genny/parse/typesets.go | 89 -
.../github.com/containerd/cgroups/README.md | 59 +-
.../github.com/containerd/cgroups/Vagrantfile | 46 -
.../github.com/containerd/cgroups/cgroup.go | 12 +-
.../github.com/containerd/cgroups/cpuacct.go | 36 +-
.../github.com/containerd/cgroups/cpuset.go | 5 +-
.../github.com/containerd/cgroups/freezer.go | 4 +-
vendor/github.com/containerd/cgroups/pids.go | 3 +-
vendor/github.com/containerd/cgroups/rdma.go | 5 +-
vendor/github.com/containerd/cgroups/utils.go | 7 +-
.../coreos/go-systemd/v22/dbus/dbus.go | 5 +
.../coreos/go-systemd/v22/dbus/methods.go | 34 +
.../decred/dcrd/dcrec/secp256k1/v4/ecdh.go | 4 +-
.../decred/dcrd/dcrec/secp256k1/v4/privkey.go | 46 +-
vendor/github.com/docker/go-units/size.go | 70 +-
vendor/github.com/go-logr/logr/.golangci.yaml | 3 -
vendor/github.com/go-logr/logr/discard.go | 32 +-
vendor/github.com/go-logr/logr/funcr/funcr.go | 804 ++
vendor/github.com/go-logr/logr/logr.go | 166 +-
vendor/github.com/golang/mock/AUTHORS | 12 +
vendor/github.com/golang/mock/CONTRIBUTORS | 37 +
vendor/github.com/golang/mock/LICENSE | 202 +
.../github.com/golang/mock/mockgen/mockgen.go | 701 ++
.../golang/mock/mockgen/model/model.go | 495 +
.../github.com/golang/mock/mockgen/parse.go | 644 ++
.../github.com/golang/mock/mockgen/reflect.go | 256 +
.../mock/mockgen/version.1.11.go} | 15 +-
.../mock/mockgen/version.1.12.go} | 23 +-
.../github.com/google/go-cmp/cmp/compare.go | 64 +-
.../google/go-cmp/cmp/internal/diff/diff.go | 44 +-
.../google/go-cmp/cmp/internal/value/zero.go | 48 -
.../github.com/google/go-cmp/cmp/options.go | 10 +-
vendor/github.com/google/go-cmp/cmp/path.go | 20 +-
.../google/go-cmp/cmp/report_compare.go | 10 +-
.../google/go-cmp/cmp/report_reflect.go | 11 +-
.../google/go-cmp/cmp/report_slices.go | 25 +-
.../google/go-cmp/cmp/report_text.go | 1 +
vendor/github.com/google/pprof/AUTHORS | 7 +
vendor/github.com/google/pprof/CONTRIBUTORS | 16 +
vendor/github.com/google/pprof/LICENSE | 202 +
.../github.com/google/pprof/profile/encode.go | 588 +
.../github.com/google/pprof/profile/filter.go | 274 +
.../github.com/google/pprof/profile/index.go | 64 +
.../pprof/profile/legacy_java_profile.go | 315 +
.../google/pprof/profile/legacy_profile.go | 1228 +++
.../github.com/google/pprof/profile/merge.go | 667 ++
.../google/pprof/profile/profile.go | 856 ++
.../github.com/google/pprof/profile/proto.go | 367 +
.../github.com/google/pprof/profile/prune.go | 194 +
.../github.com/hashicorp/errwrap/errwrap.go | 9 +
vendor/github.com/huin/goupnp/README.md | 11 +
.../huin/goupnp/dcps/internetgateway1/gen.go | 2 +-
.../dcps/internetgateway1/internetgateway1.go | 216 +-
.../huin/goupnp/dcps/internetgateway2/gen.go | 2 +-
.../dcps/internetgateway2/internetgateway2.go | 288 +-
vendor/github.com/huin/goupnp/device.go | 14 +-
vendor/github.com/huin/goupnp/go.work | 6 +
vendor/github.com/huin/goupnp/goupnp.go | 41 +-
vendor/github.com/huin/goupnp/httpu/serve.go | 20 +-
.../github.com/huin/goupnp/service_client.go | 25 +-
vendor/github.com/huin/goupnp/soap/soap.go | 8 +-
vendor/github.com/huin/goupnp/soap/types.go | 50 +
vendor/github.com/huin/goupnp/ssdp/ssdp.go | 18 +-
.../huin/goupnp/workspace.code-workspace | 11 +
vendor/github.com/ipfs/go-cid/README.md | 2 +-
vendor/github.com/ipfs/go-cid/cid.go | 130 +-
vendor/github.com/ipfs/go-cid/cid_fuzz.go | 1 -
vendor/github.com/ipfs/go-cid/version.json | 2 +-
.../github.com/ipfs/go-datastore/basic_ds.go | 56 -
.../github.com/ipfs/go-datastore/datastore.go | 27 +-
.../github.com/ipfs/go-datastore/features.go | 132 +
.../github.com/ipfs/go-datastore/null_ds.go | 120 +
.../github.com/ipfs/go-datastore/version.json | 2 +-
.../github.com/klauspost/compress/.gitignore | 7 +
.../klauspost/compress/.goreleaser.yml | 2 +-
.../github.com/klauspost/compress/README.md | 151 +-
.../klauspost/compress/fse/compress.go | 31 +-
.../klauspost/compress/fse/decompress.go | 4 +-
.../klauspost/compress/huff0/autogen.go | 5 -
.../klauspost/compress/huff0/bitreader.go | 18 +-
.../klauspost/compress/huff0/bitwriter.go | 125 +-
.../klauspost/compress/huff0/bytereader.go | 10 -
.../klauspost/compress/huff0/compress.go | 118 +-
.../klauspost/compress/huff0/decompress.go | 151 +-
.../compress/huff0/decompress_8b_amd64.s | 488 -
.../compress/huff0/decompress_8b_amd64.s.in | 197 -
.../compress/huff0/decompress_amd64.go | 179 +-
.../compress/huff0/decompress_amd64.s | 1150 +-
.../compress/huff0/decompress_amd64.s.in | 195 -
.../compress/huff0/decompress_generic.go | 120 +-
.../compress/internal/cpuinfo/cpuinfo.go | 34 +
.../internal/cpuinfo/cpuinfo_amd64.go | 11 +
.../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 +
.../compress/internal/snapref/encode_other.go | 28 +-
.../klauspost/compress/zstd/README.md | 60 +-
.../klauspost/compress/zstd/bitreader.go | 7 -
.../klauspost/compress/zstd/bitwriter.go | 76 -
.../klauspost/compress/zstd/blockdec.go | 92 +-
.../klauspost/compress/zstd/blockenc.go | 9 +-
.../klauspost/compress/zstd/bytebuf.go | 24 +-
.../klauspost/compress/zstd/bytereader.go | 6 -
.../klauspost/compress/zstd/decodeheader.go | 9 +-
.../klauspost/compress/zstd/decoder.go | 254 +-
.../compress/zstd/decoder_options.go | 79 +-
.../klauspost/compress/zstd/dict.go | 51 +-
.../klauspost/compress/zstd/enc_base.go | 28 +-
.../klauspost/compress/zstd/enc_best.go | 264 +-
.../klauspost/compress/zstd/enc_better.go | 43 +-
.../klauspost/compress/zstd/enc_dfast.go | 33 +-
.../klauspost/compress/zstd/enc_fast.go | 20 +-
.../klauspost/compress/zstd/encoder.go | 119 +-
.../compress/zstd/encoder_options.go | 42 +-
.../klauspost/compress/zstd/framedec.go | 143 +-
.../klauspost/compress/zstd/fse_decoder.go | 128 +-
.../compress/zstd/fse_decoder_amd64.go | 65 +
.../compress/zstd/fse_decoder_amd64.s | 126 +
.../compress/zstd/fse_decoder_generic.go | 72 +
.../klauspost/compress/zstd/fse_encoder.go | 23 -
.../klauspost/compress/zstd/fuzz.go | 11 -
.../klauspost/compress/zstd/fuzz_none.go | 11 -
.../klauspost/compress/zstd/hash.go | 6 -
.../klauspost/compress/zstd/history.go | 21 +-
.../compress/zstd/internal/xxhash/README.md | 49 +-
.../compress/zstd/internal/xxhash/xxhash.go | 47 +-
.../zstd/internal/xxhash/xxhash_amd64.s | 336 +-
.../zstd/internal/xxhash/xxhash_arm64.s | 140 +-
.../zstd/internal/xxhash/xxhash_asm.go | 2 +-
.../zstd/internal/xxhash/xxhash_other.go | 19 +-
.../klauspost/compress/zstd/seqdec.go | 271 +-
.../klauspost/compress/zstd/seqdec_amd64.go | 394 +
.../klauspost/compress/zstd/seqdec_amd64.s | 4175 +++++++
.../klauspost/compress/zstd/seqdec_generic.go | 237 +
.../github.com/klauspost/compress/zstd/zip.go | 57 +-
.../klauspost/compress/zstd/zstd.go | 46 +-
.../github.com/klauspost/cpuid/v2/README.md | 241 +-
vendor/github.com/klauspost/cpuid/v2/cpuid.go | 235 +-
.../klauspost/cpuid/v2/detect_x86.go | 2 +-
.../klauspost/cpuid/v2/featureid_string.go | 367 +-
.../klauspost/cpuid/v2/os_darwin_arm64.go | 2 +-
vendor/github.com/koron/go-ssdp/Makefile | 20 +-
vendor/github.com/koron/go-ssdp/advertise.go | 90 +-
vendor/github.com/koron/go-ssdp/announce.go | 50 +-
vendor/github.com/koron/go-ssdp/doc.go | 2 +-
.../koron/go-ssdp/internal/multicast/doc.go | 4 +
.../{ => internal/multicast}/interface.go | 37 +-
.../{ => internal/multicast}/multicast.go | 55 +-
.../koron/go-ssdp/internal/multicast/udp.go | 65 +
.../koron/go-ssdp/internal/ssdplog/ssdplog.go | 16 +
vendor/github.com/koron/go-ssdp/location.go | 40 +
vendor/github.com/koron/go-ssdp/log.go | 12 -
vendor/github.com/koron/go-ssdp/monitor.go | 14 +-
vendor/github.com/koron/go-ssdp/search.go | 17 +-
vendor/github.com/koron/go-ssdp/ssdp.go | 37 +
vendor/github.com/koron/go-ssdp/udp.go | 65 -
.../kubeedge/beehive/pkg/common/type.go | 27 +
.../{context => channel}/context_channel.go | 161 +-
.../beehive/pkg/core/context/context.go | 16 +-
.../pkg/core/context/context_factory.go | 231 +-
.../pkg/core/context/context_unixsocket.go | 71 -
.../kubeedge/beehive/pkg/core/core.go | 70 +-
.../beehive/pkg/core/model/message.go | 108 +-
.../kubeedge/beehive/pkg/core/module.go | 60 +-
.../beehive/pkg/core/socket/broker/broker.go | 143 +
.../beehive/pkg/core/socket/config/config.go | 88 +
.../beehive/pkg/core/socket/context_socket.go | 376 +
.../beehive/pkg/core/socket/helper.go | 57 +
.../beehive/pkg/core/socket/store/pipe.go | 108 +
.../beehive/pkg/core/socket/store/pipeinfo.go | 42 +
.../pkg/core/socket/synckeeper/keeper.go | 68 +
.../pkg/core/socket/wrapper/packer/packer.go | 114 +
.../pkg/core/socket/wrapper/reader/package.go | 79 +
.../pkg/core/socket/wrapper/reader/raw.go | 64 +
.../pkg/core/socket/wrapper/reader/reader.go | 30 +
.../pkg/core/socket/wrapper/wrapper.go | 91 +
.../pkg/core/socket/wrapper/writer/package.go | 67 +
.../pkg/core/socket/wrapper/writer/raw.go | 45 +
.../pkg/core/socket/wrapper/writer/writer.go | 30 +
.../kubeedge/common/constants/default.go | 40 +-
.../devices/v1alpha2/device_model_types.go | 2 +
.../devices/v1alpha2/zz_generated.deepcopy.go | 1 +
.../apis/rules/v1/zz_generated.deepcopy.go | 1 +
.../kubeedge/tests/e2e/constants/constants.go | 5 +
.../kubeedge/tests/e2e/utils/common.go | 93 +-
.../kubeedge/kubeedge/tests/e2e/utils/log.go | 9 +-
.../kubeedge/kubeedge/tests/e2e/utils/node.go | 4 +-
.../kubeedge/kubeedge/tests/e2e/utils/pod.go | 46 +-
.../libp2p/go-libp2p-asn-util/asn.go | 40 +-
.../libp2p/go-libp2p-asn-util/version.json | 2 +-
.../github.com/libp2p/go-libp2p-core/LICENSE | 4 -
.../libp2p/go-libp2p-core/LICENSE-APACHE | 13 -
.../libp2p/go-libp2p-core/crypto/ecdsa.go | 87 -
.../libp2p/go-libp2p-core/crypto/ed25519.go | 33 -
.../libp2p/go-libp2p-core/crypto/key.go | 149 -
.../go-libp2p-core/crypto/key_not_openssl.go | 28 -
.../go-libp2p-core/crypto/key_openssl.go | 28 -
.../go-libp2p-core/crypto/rsa_common.go | 19 -
.../libp2p/go-libp2p-core/crypto/rsa_go.go | 36 -
.../go-libp2p-core/crypto/rsa_openssl.go | 36 -
.../libp2p/go-libp2p-core/crypto/secp256k1.go | 33 -
.../libp2p/go-libp2p-core/peer/addrinfo.go | 54 -
.../libp2p/go-libp2p-core/peer/peer.go | 100 -
.../libp2p/go-libp2p-core/peer/record.go | 100 -
.../go-libp2p-core/peerstore/helpers.go | 12 -
.../go-libp2p-core/peerstore/peerstore.go | 120 -
.../libp2p/go-libp2p-core/routing/options.go | 20 -
.../libp2p/go-libp2p-core/routing/query.go | 72 -
.../libp2p/go-libp2p-core/routing/routing.go | 69 -
.../libp2p/go-libp2p-kad-dht/README.md | 8 +-
.../libp2p/go-libp2p-kad-dht/dht.go | 4 +-
.../internal/config/config.go | 2 +-
.../internal/net/message_manager.go | 2 +
.../providers/providers_manager.go | 12 +-
.../go-libp2p-kad-dht/subscriber_notifee.go | 2 +-
.../libp2p/go-libp2p-kad-dht/version.json | 2 +-
.../libp2p/go-libp2p-kbucket/.travis.yml | 30 -
.../libp2p/go-libp2p-kbucket/bucket.go | 11 +-
.../go-libp2p-kbucket/peerdiversity/filter.go | 10 +-
.../libp2p/go-libp2p-kbucket/sorting.go | 4 +-
.../libp2p/go-libp2p-kbucket/table.go | 24 +-
.../libp2p/go-libp2p-kbucket/table_refresh.go | 2 +-
.../libp2p/go-libp2p-kbucket/util.go | 11 +-
.../libp2p/go-libp2p-kbucket/version.json | 3 +
.../go-libp2p-routing-helpers/.travis.yml | 30 -
.../go-libp2p-routing-helpers/compconfig.go | 27 +
.../go-libp2p-routing-helpers/composed.go | 11 +-
.../go-libp2p-routing-helpers/compparallel.go | 323 +
.../compsequential.go | 238 +
.../go-libp2p-routing-helpers/limited.go | 6 +-
.../libp2p/go-libp2p-routing-helpers/null.go | 5 +-
.../go-libp2p-routing-helpers/parallel.go | 18 +-
.../go-libp2p-routing-helpers/tiered.go | 13 +-
.../go-libp2p-routing-helpers/version.json | 3 +
vendor/github.com/libp2p/go-libp2p/.gitignore | 4 +
.../github.com/libp2p/go-libp2p/CHANGELOG.md | 293 +
vendor/github.com/libp2p/go-libp2p/README.md | 36 +-
vendor/github.com/libp2p/go-libp2p/ROADMAP.md | 5 +
.../github.com/libp2p/go-libp2p/SECURITY.md | 20 +
.../libp2p/go-libp2p/config/config.go | 221 +-
.../go-libp2p/config/constructor_types.go | 80 -
.../github.com/libp2p/go-libp2p/config/log.go | 28 +
.../libp2p/go-libp2p/config/muxer.go | 63 -
.../go-libp2p/config/quic_stateless_reset.go | 27 +
.../go-libp2p/config/reflection_magic.go | 173 -
.../libp2p/go-libp2p/config/security.go | 78 -
.../libp2p/go-libp2p/config/transport.go | 69 -
.../libp2p/go-libp2p/core/connmgr/gater.go | 1 -
.../libp2p/go-libp2p/core/crypto/key.go | 25 +-
.../go-libp2p/core/crypto/key_openssl.go | 101 -
.../{key_not_openssl.go => key_to_stdlib.go} | 3 -
.../go-libp2p/core/crypto/openssl_common.go | 104 -
.../libp2p/go-libp2p/core/crypto/pb/Makefile | 11 -
.../go-libp2p/core/crypto/pb/crypto.pb.go | 748 +-
.../go-libp2p/core/crypto/rsa_common.go | 3 +
.../libp2p/go-libp2p/core/crypto/rsa_go.go | 12 +-
.../go-libp2p/core/crypto/rsa_openssl.go | 69 -
.../libp2p/go-libp2p/core/event/bus.go | 3 +
.../libp2p/go-libp2p/core/host/host.go | 16 +-
.../go-libp2p/core/introspection/doc.go | 7 -
.../go-libp2p/core/introspection/endpoint.go | 30 -
.../core/introspection/introspector.go | 39 -
.../go-libp2p/core/introspection/pb/Makefile | 11 -
.../go-libp2p/core/introspection/pb/doc.go | 3 -
.../core/introspection/pb/introspection.pb.go | 9718 -----------------
.../core/introspection/pb/introspection.proto | 421 -
.../libp2p/go-libp2p/core/network/conn.go | 23 +-
.../libp2p/go-libp2p/core/network/network.go | 36 +-
.../libp2p/go-libp2p/core/network/rcmgr.go | 85 +-
.../libp2p/go-libp2p/core/peer/addrinfo.go | 2 +-
.../libp2p/go-libp2p/core/peer/pb/Makefile | 11 -
.../go-libp2p/core/peer/pb/peer_record.pb.go | 710 +-
.../libp2p/go-libp2p/core/peer/peer_serde.go | 2 -
.../libp2p/go-libp2p/core/peer/record.go | 10 +-
.../go-libp2p/core/peerstore/peerstore.go | 20 +-
.../libp2p/go-libp2p/core/protocol/switch.go | 25 +-
.../libp2p/go-libp2p/core/record/envelope.go | 15 +-
.../libp2p/go-libp2p/core/record/pb/Makefile | 11 -
.../go-libp2p/core/record/pb/envelope.pb.go | 577 +-
.../go-libp2p/core/record/pb/envelope.proto | 2 +-
.../go-libp2p/core/sec/insecure/insecure.go | 85 +-
.../go-libp2p/core/sec/insecure/pb/Makefile | 11 -
.../core/sec/insecure/pb/plaintext.pb.go | 459 +-
.../core/sec/insecure/pb/plaintext.proto | 2 +-
.../libp2p/go-libp2p/core/sec/security.go | 17 +-
.../go-libp2p/core/transport/transport.go | 10 +
.../github.com/libp2p/go-libp2p/defaults.go | 67 +-
.../github.com/libp2p/go-libp2p/error_util.go | 17 -
vendor/github.com/libp2p/go-libp2p/libp2p.go | 3 +-
vendor/github.com/libp2p/go-libp2p/limits.go | 17 +-
vendor/github.com/libp2p/go-libp2p/options.go | 207 +-
.../go-libp2p/p2p/discovery/util/util.go | 2 +-
.../go-libp2p/p2p/host/autonat/autonat.go | 200 +-
.../go-libp2p/p2p/host/autonat/client.go | 36 +-
.../go-libp2p/p2p/host/autonat/interface.go | 5 +-
.../go-libp2p/p2p/host/autonat/metrics.go | 162 +
.../go-libp2p/p2p/host/autonat/options.go | 9 +
.../go-libp2p/p2p/host/autonat/pb/Makefile | 6 -
.../p2p/host/autonat/pb/autonat.pb.go | 1450 +--
.../go-libp2p/p2p/host/autonat/proto.go | 4 +-
.../libp2p/go-libp2p/p2p/host/autonat/svc.go | 21 +-
.../go-libp2p/p2p/host/autorelay/autorelay.go | 20 +-
.../go-libp2p/p2p/host/autorelay/host.go | 4 +
.../go-libp2p/p2p/host/autorelay/metrics.go | 373 +
.../go-libp2p/p2p/host/autorelay/options.go | 177 +-
.../p2p/host/autorelay/relay_finder.go | 461 +-
.../go-libp2p/p2p/host/autorelay/timer.go | 42 -
.../go-libp2p/p2p/host/basic/basic_host.go | 425 +-
.../libp2p/go-libp2p/p2p/host/basic/mocks.go | 6 +
.../libp2p/go-libp2p/p2p/host/basic/natmgr.go | 200 +-
.../p2p/host/basic/peer_connectedness.go | 71 -
.../libp2p/go-libp2p/p2p/host/blank/blank.go | 35 +-
.../p2p/host/blank/peer_connectedness.go | 71 -
.../p2p/host/eventbus/LICENSE-APACHE | 5 -
.../go-libp2p/p2p/host/eventbus/basic.go | 159 +-
.../p2p/host/eventbus/basic_metrics.go | 164 +
.../go-libp2p/p2p/host/eventbus/opts.go | 47 +
.../p2p/host/peerstore/pstoremem/addr_book.go | 19 +-
.../p2p/host/peerstore/pstoremem/protobook.go | 31 +-
.../p2p/host/pstoremanager/pstoremanager.go | 6 +-
.../go-libp2p/p2p/host/relaysvc/relay.go | 17 +-
.../p2p/host/resource-manager/README.md | 79 +-
.../p2p/host/resource-manager/error.go | 18 +-
.../p2p/host/resource-manager/extapi.go | 2 +-
.../p2p/host/resource-manager/limit.go | 125 +-
.../limit_config_test.backwards-compat.json | 45 +
.../host/resource-manager/limit_defaults.go | 596 +-
.../p2p/host/resource-manager/obs/stats.go | 410 +-
.../p2p/host/resource-manager/rcmgr.go | 43 +-
.../p2p/host/resource-manager/scope.go | 67 +-
.../p2p/host/resource-manager/sys_unix.go | 1 -
.../go-libp2p/p2p/host/routed/routed.go | 36 +-
.../go-libp2p/p2p/metricshelper/conn.go | 29 +
.../libp2p/go-libp2p/p2p/metricshelper/dir.go | 14 +
.../go-libp2p/p2p/metricshelper/pool.go | 26 +
.../go-libp2p/p2p/metricshelper/registerer.go | 20 +
.../muxer/muxer-multistream/multistream.go | 75 -
.../libp2p/go-libp2p/p2p/muxer/yamux/conn.go | 2 +-
.../go-libp2p/p2p/muxer/yamux/stream.go | 2 +-
.../go-libp2p/p2p/muxer/yamux/transport.go | 13 +-
.../p2p/net/conn-security-multistream/ssms.go | 110 -
.../go-libp2p/p2p/net/connmgr/connmgr.go | 143 +-
.../libp2p/go-libp2p/p2p/net/connmgr/decay.go | 31 +-
.../go-libp2p/p2p/net/connmgr/options.go | 11 +
.../libp2p/go-libp2p/p2p/net/nat/mapping.go | 119 -
.../libp2p/go-libp2p/p2p/net/nat/nat.go | 212 +-
.../libp2p/go-libp2p/p2p/net/swarm/clock.go | 49 +
.../go-libp2p/p2p/net/swarm/dial_ranker.go | 170 +
.../go-libp2p/p2p/net/swarm/dial_worker.go | 385 +-
.../libp2p/go-libp2p/p2p/net/swarm/swarm.go | 145 +-
.../go-libp2p/p2p/net/swarm/swarm_addr.go | 2 +-
.../go-libp2p/p2p/net/swarm/swarm_conn.go | 28 +-
.../go-libp2p/p2p/net/swarm/swarm_dial.go | 274 +-
.../go-libp2p/p2p/net/swarm/swarm_listen.go | 21 +-
.../go-libp2p/p2p/net/swarm/swarm_metrics.go | 237 +
.../go-libp2p/p2p/net/swarm/swarm_stream.go | 12 +-
.../p2p/net/swarm/swarm_transport.go | 18 +-
.../libp2p/go-libp2p/p2p/net/upgrader/conn.go | 14 +
.../go-libp2p/p2p/net/upgrader/listener.go | 4 +
.../go-libp2p/p2p/net/upgrader/upgrader.go | 238 +-
.../p2p/protocol/circuitv1/pb/Makefile | 11 -
.../p2p/protocol/circuitv1/pb/circuitv1.pb.go | 868 --
.../p2p/protocol/circuitv1/pb/circuitv1.proto | 44 -
.../p2p/protocol/circuitv1/relay/options.go | 46 -
.../p2p/protocol/circuitv1/relay/relay.go | 452 -
.../p2p/protocol/circuitv2/client/client.go | 2 -
.../p2p/protocol/circuitv2/client/conn.go | 24 +-
.../p2p/protocol/circuitv2/client/dial.go | 67 +-
.../p2p/protocol/circuitv2/client/handlers.go | 83 -
.../p2p/protocol/circuitv2/client/listen.go | 4 +-
.../protocol/circuitv2/client/reservation.go | 52 +-
.../protocol/circuitv2/client/transport.go | 17 +-
.../p2p/protocol/circuitv2/pb/Makefile | 11 -
.../p2p/protocol/circuitv2/pb/circuit.pb.go | 2073 +---
.../p2p/protocol/circuitv2/pb/circuit.proto | 22 +-
.../p2p/protocol/circuitv2/pb/voucher.pb.go | 533 +-
.../p2p/protocol/circuitv2/pb/voucher.proto | 12 +-
.../p2p/protocol/circuitv2/proto/protocol.go | 1 -
.../p2p/protocol/circuitv2/proto/voucher.go | 16 +-
.../p2p/protocol/circuitv2/relay/metrics.go | 268 +
.../p2p/protocol/circuitv2/relay/options.go | 16 +
.../p2p/protocol/circuitv2/relay/relay.go | 191 +-
.../p2p/protocol/circuitv2/util/io.go | 9 +-
.../p2p/protocol/circuitv2/util/pbconv.go | 64 +-
.../p2p/protocol/holepunch/filter.go | 27 +
.../p2p/protocol/holepunch/holepuncher.go | 73 +-
.../p2p/protocol/holepunch/metrics.go | 187 +
.../p2p/protocol/holepunch/pb/Makefile | 11 -
.../p2p/protocol/holepunch/pb/holepunch.pb.go | 510 +-
.../go-libp2p/p2p/protocol/holepunch/svc.go | 53 +-
.../p2p/protocol/holepunch/tracer.go | 257 +-
.../go-libp2p/p2p/protocol/holepunch/util.go | 9 +
.../go-libp2p/p2p/protocol/identify/id.go | 718 +-
.../p2p/protocol/identify/id_delta.go | 82 -
.../p2p/protocol/identify/id_go117.go | 22 -
.../p2p/protocol/identify/id_push.go | 17 -
.../p2p/protocol/identify/metrics.go | 206 +
.../p2p/protocol/identify/obsaddr.go | 81 +-
.../go-libp2p/p2p/protocol/identify/opts.go | 16 +
.../p2p/protocol/identify/pb/Makefile | 11 -
.../p2p/protocol/identify/pb/identify.pb.go | 1055 +-
.../p2p/protocol/identify/pb/identify.proto | 10 -
.../p2p/protocol/identify/peer_loop.go | 264 -
.../identify/{id_go118.go => user_agent.go} | 2 -
.../go-libp2p/p2p/protocol/ping/ping.go | 2 +-
.../go-libp2p/p2p/security/noise/handshake.go | 102 +-
.../go-libp2p/p2p/security/noise/pb/Makefile | 11 -
.../p2p/security/noise/pb/payload.pb.go | 557 +-
.../p2p/security/noise/pb/payload.proto | 13 +-
.../go-libp2p/p2p/security/noise/session.go | 45 +-
.../p2p/security/noise/session_transport.go | 79 +-
.../go-libp2p/p2p/security/noise/transport.go | 90 +-
.../libp2p/go-libp2p/p2p/security/tls/conn.go | 18 +-
.../go-libp2p/p2p/security/tls/crypto.go | 33 +-
.../go-libp2p/p2p/security/tls/transport.go | 79 +-
.../go-libp2p/p2p/transport/quic/conn.go | 51 +-
.../go-libp2p/p2p/transport/quic/listener.go | 109 +-
.../p2p/transport/quic/quic_multiaddr.go | 30 -
.../go-libp2p/p2p/transport/quic/stream.go | 2 +-
.../go-libp2p/p2p/transport/quic/transport.go | 299 +-
.../p2p/transport/quic/virtuallistener.go | 175 +
.../p2p/transport/quicreuse/config.go | 25 +
.../p2p/transport/quicreuse/connmgr.go | 234 +
.../p2p/transport/quicreuse/listener.go | 227 +
.../p2p/transport/quicreuse/options.go | 28 +
.../p2p/transport/quicreuse/quic_multiaddr.go | 64 +
.../transport/{quic => quicreuse}/reuse.go | 107 +-
.../transport/{quic => quicreuse}/tracer.go | 26 +-
.../{quic => quicreuse}/tracer_metrics.go | 53 +-
.../go-libp2p/p2p/transport/tcp/metrics.go | 5 +-
.../libp2p/go-libp2p/p2p/transport/tcp/tcp.go | 40 +-
.../p2p/transport/websocket/addrs.go | 62 +-
.../go-libp2p/p2p/transport/websocket/conn.go | 13 +
.../p2p/transport/websocket/listener.go | 65 +-
.../p2p/transport/websocket/websocket.go | 111 +-
.../transport/webtransport/cert_manager.go | 213 +
.../p2p/transport/webtransport/conn.go | 82 +
.../p2p/transport/webtransport/crypto.go | 155 +
.../p2p/transport/webtransport/listener.go | 216 +
.../p2p/transport/webtransport/multiaddr.go | 107 +
.../webtransport/noise_early_data.go | 36 +
.../p2p/transport/webtransport/stream.go | 71 +
.../p2p/transport/webtransport/transport.go | 414 +
vendor/github.com/libp2p/go-libp2p/tools.go | 9 +
.../github.com/libp2p/go-libp2p/version.json | 2 +-
vendor/github.com/libp2p/go-msgio/fuzz.go | 1 -
vendor/github.com/libp2p/go-msgio/msgio.go | 2 +-
vendor/github.com/libp2p/go-msgio/num.go | 5 +-
.../libp2p/go-msgio/pbio/interfaces.go | 40 +
.../libp2p/go-msgio/pbio/uvarint_reader.go | 93 +
.../libp2p/go-msgio/pbio/uvarint_writer.go | 103 +
.../libp2p/go-msgio/protoio/interfaces.go | 10 +-
.../libp2p/go-msgio/protoio/uvarint_reader.go | 10 +-
.../libp2p/go-msgio/protoio/uvarint_writer.go | 10 +-
.../github.com/libp2p/go-msgio/version.json | 2 +-
vendor/github.com/libp2p/go-nat/nat.go | 9 +-
vendor/github.com/libp2p/go-nat/natpmp.go | 6 +-
vendor/github.com/libp2p/go-nat/upnp.go | 38 +-
vendor/github.com/libp2p/go-nat/version.json | 3 +
.../github.com/libp2p/go-netroute/common.go | 46 +-
.../libp2p/go-netroute/netroute_bsd.go | 1 -
.../libp2p/go-netroute/netroute_linux.go | 1 -
.../libp2p/go-netroute/netroute_stub.go | 1 -
.../libp2p/go-netroute/netroute_windows.go | 9 +-
.../libp2p/go-netroute/sockaddr_windows.go | 1 -
.../libp2p/go-netroute/version.json | 2 +-
.../github.com/libp2p/go-openssl/.gitignore | 1 -
vendor/github.com/libp2p/go-openssl/AUTHORS | 24 -
vendor/github.com/libp2p/go-openssl/LICENSE | 191 -
vendor/github.com/libp2p/go-openssl/README.md | 40 -
vendor/github.com/libp2p/go-openssl/alloc.go | 19 -
vendor/github.com/libp2p/go-openssl/bio.go | 305 -
vendor/github.com/libp2p/go-openssl/build.go | 25 -
.../libp2p/go-openssl/build_static.go | 25 -
vendor/github.com/libp2p/go-openssl/cert.go | 432 -
.../github.com/libp2p/go-openssl/ciphers.go | 335 -
.../libp2p/go-openssl/ciphers_gcm.go | 152 -
vendor/github.com/libp2p/go-openssl/conn.go | 621 --
vendor/github.com/libp2p/go-openssl/ctx.go | 618 --
vendor/github.com/libp2p/go-openssl/dh.go | 66 -
.../github.com/libp2p/go-openssl/dhparam.go | 64 -
vendor/github.com/libp2p/go-openssl/digest.go | 51 -
vendor/github.com/libp2p/go-openssl/engine.go | 50 -
.../github.com/libp2p/go-openssl/extension.c | 40 -
vendor/github.com/libp2p/go-openssl/fips.go | 52 -
vendor/github.com/libp2p/go-openssl/hmac.go | 91 -
.../github.com/libp2p/go-openssl/hostname.c | 373 -
.../github.com/libp2p/go-openssl/hostname.go | 140 -
vendor/github.com/libp2p/go-openssl/http.go | 61 -
vendor/github.com/libp2p/go-openssl/init.go | 116 -
.../libp2p/go-openssl/init_posix.go | 69 -
.../libp2p/go-openssl/init_windows.go | 58 -
vendor/github.com/libp2p/go-openssl/key.go | 522 -
.../github.com/libp2p/go-openssl/mapping.go | 62 -
vendor/github.com/libp2p/go-openssl/md4.go | 92 -
vendor/github.com/libp2p/go-openssl/md5.go | 89 -
vendor/github.com/libp2p/go-openssl/net.go | 166 -
vendor/github.com/libp2p/go-openssl/nid.go | 210 -
vendor/github.com/libp2p/go-openssl/object.go | 24 -
vendor/github.com/libp2p/go-openssl/pem.go | 28 -
vendor/github.com/libp2p/go-openssl/sha1.go | 96 -
vendor/github.com/libp2p/go-openssl/sha256.go | 89 -
vendor/github.com/libp2p/go-openssl/shim.c | 778 --
vendor/github.com/libp2p/go-openssl/shim.h | 184 -
vendor/github.com/libp2p/go-openssl/sni.c | 23 -
vendor/github.com/libp2p/go-openssl/ssl.go | 172 -
.../github.com/libp2p/go-openssl/tickets.go | 224 -
.../libp2p/go-openssl/utils/errors.go | 50 -
.../libp2p/go-openssl/utils/future.go | 79 -
.../github.com/libp2p/go-reuseport/README.md | 5 +-
.../libp2p/go-reuseport/control_unix.go | 15 +-
.../libp2p/go-reuseport/control_wasm.go | 1 -
.../libp2p/go-reuseport/control_windows.go | 6 +-
.../libp2p/go-reuseport/interface.go | 25 +-
.../libp2p/go-reuseport/version.json | 2 +-
.../libp2p/go-yamux/v3/version.json | 3 -
.../libp2p/go-yamux/{v3 => v4}/.gitignore | 0
.../libp2p/go-yamux/{v3 => v4}/LICENSE | 0
.../libp2p/go-yamux/{v3 => v4}/LICENSE-BSD | 0
.../libp2p/go-yamux/{v3 => v4}/README.md | 0
.../libp2p/go-yamux/{v3 => v4}/addr.go | 0
.../libp2p/go-yamux/{v3 => v4}/const.go | 0
.../libp2p/go-yamux/{v3 => v4}/deadline.go | 0
.../libp2p/go-yamux/{v3 => v4}/mux.go | 12 +-
.../libp2p/go-yamux/{v3 => v4}/ping.go | 0
.../libp2p/go-yamux/{v3 => v4}/session.go | 105 +-
.../libp2p/go-yamux/{v3 => v4}/spec.md | 0
.../libp2p/go-yamux/{v3 => v4}/stream.go | 23 +-
.../libp2p/go-yamux/{v3 => v4}/util.go | 26 +-
.../libp2p/go-yamux/v4/version.json | 3 +
.../lucas-clemente/quic-go/README.md | 61 -
.../lucas-clemente/quic-go/closed_conn.go | 112 -
.../quic-go/internal/ackhandler/ackhandler.go | 21 -
.../quic-go/internal/ackhandler/frame.go | 9 -
.../quic-go/internal/ackhandler/gen.go | 3 -
.../quic-go/internal/ackhandler/mockgen.go | 3 -
.../internal/ackhandler/packet_linkedlist.go | 217 -
.../ackhandler/sent_packet_history.go | 108 -
.../quic-go/internal/handshake/mockgen.go | 3 -
.../quic-go/internal/handshake/retry.go | 62 -
.../internal/protocol/connection_id.go | 69 -
.../quic-go/internal/qtls/go116.go | 100 -
.../quic-go/internal/qtls/go117.go | 100 -
.../quic-go/internal/qtls/go120.go | 6 -
.../quic-go/internal/qtls/go_oldversion.go | 7 -
.../quic-go/internal/utils/atomic_bool.go | 22 -
.../quic-go/internal/utils/gen.go | 5 -
.../quic-go/internal/utils/minmax.go | 170 -
.../internal/utils/new_connection_id.go | 12 -
.../utils/newconnectionid_linkedlist.go | 217 -
.../quic-go/internal/utils/packet_interval.go | 9 -
.../utils/packetinterval_linkedlist.go | 217 -
.../internal/utils/streamframe_interval.go | 9 -
.../quic-go/internal/wire/extended_header.go | 249 -
.../lucas-clemente/quic-go/logging/mockgen.go | 4 -
.../lucas-clemente/quic-go/mockgen.go | 27 -
.../lucas-clemente/quic-go/packet_packer.go | 894 --
.../quic-go/streams_map_generic_helper.go | 18 -
.../quic-go/streams_map_incoming_bidi.go | 192 -
.../quic-go/streams_map_incoming_generic.go | 190 -
.../quic-go/streams_map_outgoing_bidi.go | 226 -
.../quic-go/streams_map_outgoing_generic.go | 224 -
.../lucas-clemente/quic-go/tools.go | 9 -
.../marten-seemann/qtls-go1-16/README.md | 6 -
.../marten-seemann/qtls-go1-16/auth.go | 289 -
.../qtls-go1-16/cipher_suites.go | 532 -
.../marten-seemann/qtls-go1-16/common.go | 1576 ---
.../marten-seemann/qtls-go1-16/common_js.go | 12 -
.../marten-seemann/qtls-go1-16/common_nojs.go | 20 -
.../marten-seemann/qtls-go1-16/conn.go | 1536 ---
.../qtls-go1-16/handshake_client.go | 1105 --
.../qtls-go1-16/handshake_client_tls13.go | 740 --
.../qtls-go1-16/handshake_messages.go | 1832 ----
.../qtls-go1-16/handshake_server.go | 878 --
.../qtls-go1-16/handshake_server_tls13.go | 912 --
.../qtls-go1-16/key_agreement.go | 338 -
.../marten-seemann/qtls-go1-16/ticket.go | 259 -
.../marten-seemann/qtls-go1-16/tls.go | 393 -
.../marten-seemann/qtls-go1-17/README.md | 6 -
.../qtls-go1-17/cipher_suites.go | 691 --
.../marten-seemann/qtls-go1-17/common.go | 1485 ---
.../marten-seemann/qtls-go1-17/conn.go | 1601 ---
.../qtls-go1-17/handshake_client.go | 1111 --
.../qtls-go1-17/handshake_messages.go | 1832 ----
.../qtls-go1-17/handshake_server.go | 905 --
.../qtls-go1-17/handshake_server_tls13.go | 896 --
.../marten-seemann/qtls-go1-17/prf.go | 283 -
.../marten-seemann/qtls-go1-18/README.md | 6 -
.../marten-seemann/qtls-go1-18/alert.go | 102 -
.../marten-seemann/qtls-go1-18/auth.go | 289 -
.../qtls-go1-18/handshake_client_tls13.go | 732 --
.../qtls-go1-18/key_schedule.go | 199 -
.../marten-seemann/qtls-go1-18/unsafe.go | 96 -
.../marten-seemann/qtls-go1-19/README.md | 6 -
.../marten-seemann/qtls-go1-19/alert.go | 102 -
.../marten-seemann/qtls-go1-19/cpu.go | 22 -
.../marten-seemann/qtls-go1-19/cpu_other.go | 12 -
.../qtls-go1-19/key_agreement.go | 357 -
.../qtls-go1-19/key_schedule.go | 199 -
.../marten-seemann/qtls-go1-19/prf.go | 283 -
.../marten-seemann/qtls-go1-19/ticket.go | 274 -
.../marten-seemann/qtls-go1-19/tls.go | 362 -
.../marten-seemann/qtls-go1-19/unsafe.go | 96 -
.../github.com/mattn/go-isatty/isatty_bsd.go | 4 +-
vendor/github.com/mattn/go-pointer/README.md | 29 -
vendor/github.com/mattn/go-pointer/doc.go | 1 -
vendor/github.com/mattn/go-pointer/pointer.go | 57 -
vendor/github.com/miekg/dns/LICENSE | 49 +-
vendor/github.com/miekg/dns/README.md | 6 +
vendor/github.com/miekg/dns/acceptfunc.go | 1 -
vendor/github.com/miekg/dns/client.go | 62 +-
vendor/github.com/miekg/dns/clientconfig.go | 2 +-
vendor/github.com/miekg/dns/defaults.go | 22 +-
vendor/github.com/miekg/dns/dnssec.go | 14 +-
vendor/github.com/miekg/dns/doc.go | 86 +-
vendor/github.com/miekg/dns/edns.go | 52 +-
vendor/github.com/miekg/dns/fuzz.go | 1 +
vendor/github.com/miekg/dns/labels.go | 2 +-
.../miekg/dns/listen_no_reuseport.go | 3 +-
.../github.com/miekg/dns/listen_reuseport.go | 2 +-
vendor/github.com/miekg/dns/msg.go | 60 +-
vendor/github.com/miekg/dns/msg_helpers.go | 42 +-
vendor/github.com/miekg/dns/scan.go | 20 +-
vendor/github.com/miekg/dns/scan_rr.go | 125 +-
vendor/github.com/miekg/dns/server.go | 4 +-
vendor/github.com/miekg/dns/singleinflight.go | 61 -
vendor/github.com/miekg/dns/svcb.go | 63 +-
vendor/github.com/miekg/dns/tools.go | 1 +
vendor/github.com/miekg/dns/types.go | 119 +-
vendor/github.com/miekg/dns/udp.go | 1 +
vendor/github.com/miekg/dns/udp_windows.go | 8 +-
vendor/github.com/miekg/dns/version.go | 2 +-
vendor/github.com/miekg/dns/xfr.go | 1 -
vendor/github.com/miekg/dns/zduplicate.go | 58 +
vendor/github.com/miekg/dns/zmsg.go | 110 +
vendor/github.com/miekg/dns/ztypes.go | 453 +-
.../minio/sha256-simd/cpuid_other.go | 6 +-
vendor/github.com/minio/sha256-simd/sha256.go | 131 +-
.../sha256-simd/sha256blockAvx512_amd64.go | 3 +-
.../sha256-simd/sha256blockAvx512_amd64.s | 2 +-
.../minio/sha256-simd/sha256blockSha_amd64.go | 6 -
.../minio/sha256-simd/sha256block_amd64.go | 14 +-
...56blockSha_amd64.s => sha256block_amd64.s} | 4 +-
.../minio/sha256-simd/sha256block_arm64.go | 13 +-
.../minio/sha256-simd/sha256block_arm64.s | 4 +-
.../minio/sha256-simd/sha256block_other.go | 11 +-
.../multiformats/go-base32/base32.go | 3 +-
.../go-base32}/version.json | 0
.../multiformats/go-base36/README.md | 2 +-
.../multiformats/go-base36/base36.go | 106 +-
.../multiformats/go-base36/version.json | 3 +
.../multiformats/go-multiaddr/doc.go | 41 +-
.../multiformats/go-multiaddr/filter.go | 5 +-
.../multiformats/go-multiaddr/interface.go | 13 +-
.../multiformats/go-multiaddr/net/net.go | 16 +-
.../multiformats/go-multiaddr/protocols.go | 96 +-
.../multiformats/go-multiaddr/version.json | 2 +-
.../multiformats/go-multibase/version.json | 2 +-
.../multiformats/go-multicodec/README.md | 30 +-
.../multiformats/go-multicodec/code.go | 2 +-
.../multiformats/go-multicodec/code_string.go | 1023 +-
.../multiformats/go-multicodec/code_table.go | 308 +-
.../multiformats/go-multicodec/version.json | 2 +-
.../register/all/multihash_all.go | 22 +-
.../register/blake2/multihash_blake2.go | 2 +-
.../register/blake3/multihash_blake3.go | 12 +-
.../miniosha256/multihash_miniosha256.go | 23 -
.../register/miniosha256/post_go1.21.go | 22 +
.../register/miniosha256/pre_go1_21.go | 29 +
.../register/murmur3/multihash_murmur3.go | 12 +-
.../go-multihash/register/sha256/sha256.go | 21 +
.../register/sha3/multihash_sha3.go | 18 +-
.../multiformats/go-multihash/sum.go | 4 +-
.../multiformats/go-multihash/version.json | 2 +-
.../multiformats/go-multistream/.gitignore | 1 -
.../multiformats/go-multistream/README.md | 2 +-
.../multiformats/go-multistream/client.go | 100 +-
.../multiformats/go-multistream/lazyClient.go | 34 +-
.../go-multistream/multistream.go | 70 +-
.../go-multistream/multistream_fuzz.go | 29 -
.../multiformats/go-multistream/version.json | 2 +-
.../multiformats/go-varint/.travis.yml | 30 -
.../multiformats/go-varint/varint.go | 7 +-
.../multiformats/go-varint/version.json | 3 +
vendor/github.com/nxadm/tail/.gitignore | 3 -
vendor/github.com/nxadm/tail/CHANGES.md | 56 -
vendor/github.com/nxadm/tail/Dockerfile | 19 -
vendor/github.com/nxadm/tail/LICENSE | 21 -
vendor/github.com/nxadm/tail/README.md | 44 -
.../nxadm/tail/ratelimiter/leakybucket.go | 97 -
.../nxadm/tail/ratelimiter/memory.go | 60 -
.../nxadm/tail/ratelimiter/storage.go | 6 -
vendor/github.com/nxadm/tail/tail.go | 455 -
vendor/github.com/nxadm/tail/tail_posix.go | 17 -
vendor/github.com/nxadm/tail/tail_windows.go | 19 -
vendor/github.com/nxadm/tail/util/util.go | 49 -
.../nxadm/tail/watch/filechanges.go | 37 -
vendor/github.com/nxadm/tail/watch/inotify.go | 136 -
.../nxadm/tail/watch/inotify_tracker.go | 249 -
vendor/github.com/nxadm/tail/watch/polling.go | 119 -
vendor/github.com/nxadm/tail/watch/watch.go | 21 -
.../github.com/nxadm/tail/winfile/winfile.go | 93 -
vendor/github.com/onsi/ginkgo/.travis.yml | 24 -
vendor/github.com/onsi/ginkgo/CHANGELOG.md | 393 -
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md | 33 -
vendor/github.com/onsi/ginkgo/README.md | 169 -
.../github.com/onsi/ginkgo/config/config.go | 232 -
.../onsi/ginkgo/ginkgo/bootstrap_command.go | 201 -
.../onsi/ginkgo/ginkgo/build_command.go | 66 -
.../ginkgo/ginkgo/convert/ginkgo_ast_nodes.go | 123 -
.../onsi/ginkgo/ginkgo/convert/import.go | 90 -
.../ginkgo/ginkgo/convert/package_rewriter.go | 128 -
.../onsi/ginkgo/ginkgo/convert/test_finder.go | 56 -
.../ginkgo/convert/testfile_rewriter.go | 162 -
.../ginkgo/convert/testing_t_rewriter.go | 130 -
.../onsi/ginkgo/ginkgo/convert_command.go | 51 -
.../onsi/ginkgo/ginkgo/generate_command.go | 274 -
.../onsi/ginkgo/ginkgo/help_command.go | 33 -
.../interrupthandler/interrupt_handler.go | 52 -
vendor/github.com/onsi/ginkgo/ginkgo/main.go | 337 -
.../onsi/ginkgo/ginkgo/nodot/nodot.go | 196 -
.../onsi/ginkgo/ginkgo/nodot_command.go | 77 -
.../onsi/ginkgo/ginkgo/notifications.go | 141 -
.../onsi/ginkgo/ginkgo/outline_command.go | 95 -
.../onsi/ginkgo/ginkgo/run_command.go | 316 -
.../run_watch_and_build_command_flags.go | 169 -
.../onsi/ginkgo/ginkgo/suite_runner.go | 173 -
.../ginkgo/ginkgo/testrunner/build_args.go | 7 -
.../ginkgo/testrunner/build_args_old.go | 7 -
.../ginkgo/ginkgo/testrunner/log_writer.go | 52 -
.../ginkgo/ginkgo/testrunner/run_result.go | 27 -
.../ginkgo/ginkgo/testrunner/test_runner.go | 554 -
.../ginkgo/ginkgo/testsuite/test_suite.go | 115 -
.../ginkgo/testsuite/vendor_check_go15.go | 16 -
.../ginkgo/testsuite/vendor_check_go16.go | 15 -
.../onsi/ginkgo/ginkgo/version_command.go | 25 -
.../onsi/ginkgo/ginkgo/watch_command.go | 175 -
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 681 --
.../internal/codelocation/code_location.go | 48 -
.../internal/containernode/container_node.go | 151 -
.../onsi/ginkgo/internal/global/init.go | 22 -
.../ginkgo/internal/leafnodes/benchmarker.go | 103 -
.../ginkgo/internal/leafnodes/interfaces.go | 19 -
.../onsi/ginkgo/internal/leafnodes/it_node.go | 47 -
.../ginkgo/internal/leafnodes/measure_node.go | 62 -
.../onsi/ginkgo/internal/leafnodes/runner.go | 117 -
.../ginkgo/internal/leafnodes/setup_nodes.go | 48 -
.../ginkgo/internal/leafnodes/suite_nodes.go | 55 -
.../synchronized_after_suite_node.go | 90 -
.../synchronized_before_suite_node.go | 181 -
.../onsi/ginkgo/internal/remote/aggregator.go | 249 -
.../internal/remote/forwarding_reporter.go | 147 -
.../internal/remote/output_interceptor.go | 13 -
.../remote/output_interceptor_unix.go | 82 -
.../internal/remote/output_interceptor_win.go | 36 -
.../onsi/ginkgo/internal/remote/server.go | 224 -
.../onsi/ginkgo/internal/spec/spec.go | 247 -
.../onsi/ginkgo/internal/spec/specs.go | 144 -
.../internal/spec_iterator/index_computer.go | 55 -
.../spec_iterator/parallel_spec_iterator.go | 59 -
.../spec_iterator/serial_spec_iterator.go | 45 -
.../sharded_parallel_spec_iterator.go | 47 -
.../internal/spec_iterator/spec_iterator.go | 20 -
.../ginkgo/internal/specrunner/random_id.go | 15 -
.../ginkgo/internal/specrunner/spec_runner.go | 411 -
.../onsi/ginkgo/internal/suite/suite.go | 227 -
.../internal/testingtproxy/testing_t_proxy.go | 109 -
.../ginkgo/internal/writer/fake_writer.go | 36 -
.../onsi/ginkgo/internal/writer/writer.go | 89 -
.../onsi/ginkgo/reporters/default_reporter.go | 87 -
.../onsi/ginkgo/reporters/fake_reporter.go | 59 -
.../onsi/ginkgo/reporters/junit_reporter.go | 178 -
.../onsi/ginkgo/reporters/reporter.go | 15 -
.../reporters/stenographer/console_logging.go | 64 -
.../stenographer/fake_stenographer.go | 142 -
.../reporters/stenographer/stenographer.go | 572 -
.../support/go-colorable/README.md | 43 -
.../support/go-colorable/colorable_others.go | 24 -
.../support/go-colorable/noncolorable.go | 57 -
.../stenographer/support/go-isatty/README.md | 37 -
.../stenographer/support/go-isatty/doc.go | 2 -
.../support/go-isatty/isatty_appengine.go | 9 -
.../support/go-isatty/isatty_bsd.go | 18 -
.../support/go-isatty/isatty_linux.go | 18 -
.../support/go-isatty/isatty_solaris.go | 16 -
.../support/go-isatty/isatty_windows.go | 19 -
.../ginkgo/reporters/teamcity_reporter.go | 106 -
.../onsi/ginkgo/types/code_location.go | 15 -
.../onsi/ginkgo/types/synchronization.go | 30 -
vendor/github.com/onsi/ginkgo/types/types.go | 174 -
.../onsi/ginkgo/{ => v2}/.gitignore | 2 +-
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 828 ++
.../github.com/onsi/ginkgo/v2/CONTRIBUTING.md | 13 +
.../github.com/onsi/ginkgo/{ => v2}/LICENSE | 0
vendor/github.com/onsi/ginkgo/v2/README.md | 115 +
.../onsi/ginkgo/{ => v2}/RELEASING.md | 10 +-
.../onsi/ginkgo/v2/config/deprecated.go | 69 +
vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 794 ++
.../onsi/ginkgo/v2/decorator_dsl.go | 143 +
.../onsi/ginkgo/v2/deprecated_dsl.go | 135 +
.../formatter/colorable_others.go} | 20 +
.../formatter}/colorable_windows.go | 74 +-
.../ginkgo/{ => v2}/formatter/formatter.go | 76 +-
.../ginkgo/v2/ginkgo/build/build_command.go | 63 +
.../onsi/ginkgo/v2/ginkgo/command/abort.go | 61 +
.../onsi/ginkgo/v2/ginkgo/command/command.go | 50 +
.../onsi/ginkgo/v2/ginkgo/command/program.go | 182 +
.../ginkgo/generators/boostrap_templates.go | 48 +
.../v2/ginkgo/generators/bootstrap_command.go | 133 +
.../v2/ginkgo/generators/generate_command.go | 259 +
.../ginkgo/generators/generate_templates.go | 41 +
.../v2/ginkgo/generators/generators_common.go | 64 +
.../onsi/ginkgo/v2/ginkgo/internal/compile.go | 161 +
.../ginkgo/internal/profiles_and_reports.go | 237 +
.../onsi/ginkgo/v2/ginkgo/internal/run.go | 355 +
.../ginkgo/v2/ginkgo/internal/test_suite.go | 283 +
.../onsi/ginkgo/v2/ginkgo/internal/utils.go | 86 +
.../v2/ginkgo/internal/verify_version.go | 54 +
.../ginkgo/v2/ginkgo/labels/labels_command.go | 123 +
.../github.com/onsi/ginkgo/v2/ginkgo/main.go | 58 +
.../ginkgo/{ => v2}/ginkgo/outline/ginkgo.go | 129 +-
.../ginkgo/{ => v2}/ginkgo/outline/import.go | 2 +-
.../ginkgo/{ => v2}/ginkgo/outline/outline.go | 23 +-
.../v2/ginkgo/outline/outline_command.go | 98 +
.../onsi/ginkgo/v2/ginkgo/run/run_command.go | 232 +
.../ginkgo/unfocus}/unfocus_command.go | 56 +-
.../ginkgo/{ => v2}/ginkgo/watch/delta.go | 0
.../{ => v2}/ginkgo/watch/delta_tracker.go | 8 +-
.../{ => v2}/ginkgo/watch/dependencies.go | 0
.../{ => v2}/ginkgo/watch/package_hash.go | 12 +-
.../{ => v2}/ginkgo/watch/package_hashes.go | 0
.../ginkgo/{ => v2}/ginkgo/watch/suite.go | 6 +-
.../ginkgo/v2/ginkgo/watch/watch_command.go | 192 +
.../onsi/ginkgo/v2/ginkgo_cli_dependencies.go | 8 +
.../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 94 +
.../onsi/ginkgo/v2/internal/counter.go | 9 +
.../failer => v2/internal}/failer.go | 71 +-
.../onsi/ginkgo/v2/internal/focus.go | 125 +
.../onsi/ginkgo/v2/internal/global/init.go | 17 +
.../onsi/ginkgo/v2/internal/group.go | 380 +
.../interrupt_handler/interrupt_handler.go | 177 +
.../sigquit_swallower_unix.go | 3 +-
.../sigquit_swallower_windows.go | 3 +-
.../onsi/ginkgo/v2/internal/node.go | 922 ++
.../onsi/ginkgo/v2/internal/ordering.go | 171 +
.../ginkgo/v2/internal/output_interceptor.go | 250 +
.../v2/internal/output_interceptor_unix.go | 73 +
.../v2/internal/output_interceptor_win.go | 7 +
.../parallel_support/client_server.go | 72 +
.../internal/parallel_support/http_client.go | 169 +
.../internal/parallel_support/http_server.go | 242 +
.../internal/parallel_support/rpc_client.go | 136 +
.../internal/parallel_support/rpc_server.go | 75 +
.../parallel_support/server_handler.go | 234 +
.../ginkgo/v2/internal/progress_report.go | 287 +
.../ginkgo/v2/internal/progress_report_bsd.go | 11 +
.../v2/internal/progress_report_unix.go | 11 +
.../ginkgo/v2/internal/progress_report_win.go | 8 +
.../v2/internal/progress_reporter_manager.go | 79 +
.../onsi/ginkgo/v2/internal/report_entry.go | 39 +
.../onsi/ginkgo/v2/internal/spec.go | 87 +
.../onsi/ginkgo/v2/internal/spec_context.go | 47 +
.../onsi/ginkgo/v2/internal/suite.go | 1017 ++
.../internal/testingtproxy/testing_t_proxy.go | 210 +
.../onsi/ginkgo/v2/internal/tree.go | 77 +
.../onsi/ginkgo/v2/internal/writer.go | 140 +
.../ginkgo/v2/reporters/default_reporter.go | 759 ++
.../v2/reporters/deprecated_reporter.go | 149 +
.../onsi/ginkgo/v2/reporters/json_report.go | 67 +
.../onsi/ginkgo/v2/reporters/junit_report.go | 376 +
.../onsi/ginkgo/v2/reporters/reporter.go | 29 +
.../ginkgo/v2/reporters/teamcity_report.go | 105 +
.../onsi/ginkgo/v2/reporting_dsl.go | 182 +
vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 309 +
.../onsi/ginkgo/v2/types/code_location.go | 159 +
.../github.com/onsi/ginkgo/v2/types/config.go | 757 ++
.../onsi/ginkgo/v2/types/deprecated_types.go | 141 +
.../{ => v2}/types/deprecation_support.go | 61 +-
.../onsi/ginkgo/v2/types/enum_support.go | 43 +
.../github.com/onsi/ginkgo/v2/types/errors.go | 630 ++
.../onsi/ginkgo/v2/types/file_filter.go | 106 +
.../github.com/onsi/ginkgo/v2/types/flags.go | 489 +
.../onsi/ginkgo/v2/types/label_filter.go | 358 +
.../onsi/ginkgo/v2/types/report_entry.go | 190 +
.../github.com/onsi/ginkgo/v2/types/types.go | 916 ++
.../onsi/ginkgo/v2/types/version.go | 3 +
vendor/github.com/onsi/gomega/.gitignore | 2 +
vendor/github.com/onsi/gomega/.travis.yml | 18 -
vendor/github.com/onsi/gomega/CHANGELOG.md | 304 +
vendor/github.com/onsi/gomega/Dockerfile | 1 -
vendor/github.com/onsi/gomega/Makefile | 33 -
vendor/github.com/onsi/gomega/RELEASING.md | 19 +-
.../onsi/gomega/docker-compose.yaml | 10 -
.../github.com/onsi/gomega/format/format.go | 86 +-
vendor/github.com/onsi/gomega/gomega_dsl.go | 261 +-
.../onsi/gomega/internal/assertion.go | 92 +-
.../onsi/gomega/internal/async_assertion.go | 591 +-
.../onsi/gomega/internal/duration_bundle.go | 16 +-
.../github.com/onsi/gomega/internal/gomega.go | 75 +-
.../onsi/gomega/internal/gutil/post_ioutil.go | 48 +
.../gomega/internal/gutil/using_ioutil.go | 47 +
.../gomega/internal/polling_signal_error.go | 106 +
.../onsi/gomega/internal/vetoptdesc.go | 22 +
vendor/github.com/onsi/gomega/matchers.go | 549 +-
.../matchers/be_comparable_to_matcher.go | 49 +
.../onsi/gomega/matchers/be_key_of_matcher.go | 45 +
.../onsi/gomega/matchers/consist_of.go | 29 +-
.../matchers/contain_element_matcher.go | 120 +-
.../onsi/gomega/matchers/have_each_matcher.go | 65 +
.../gomega/matchers/have_exact_elements.go | 83 +
.../matchers/have_existing_field_matcher.go | 36 +
.../onsi/gomega/matchers/have_field.go | 99 +
.../gomega/matchers/have_http_body_matcher.go | 4 +-
.../matchers/have_http_status_matcher.go | 4 +-
.../gomega/matchers/have_occurred_matcher.go | 2 +-
.../onsi/gomega/matchers/have_value.go | 54 +
.../gomega/matchers/match_error_matcher.go | 12 +-
.../gomega/matchers/match_yaml_matcher.go | 2 +-
.../onsi/gomega/matchers/succeed_matcher.go | 11 +-
.../onsi/gomega/matchers/with_transform.go | 16 +-
vendor/github.com/onsi/gomega/types/types.go | 28 +-
.../client_golang/prometheus/counter.go | 11 +-
.../client_golang/prometheus/doc.go | 107 +-
.../client_golang/prometheus/gauge.go | 6 +-
.../client_golang/prometheus/histogram.go | 978 +-
.../prometheus/internal/almost_equal.go | 60 +
.../prometheus/internal/difflib.go | 13 +-
.../client_golang/prometheus/labels.go | 3 +-
.../client_golang/prometheus/metric.go | 2 +-
.../client_golang/prometheus/promauto/auto.go | 170 +-
.../prometheus/promhttp/instrument_client.go | 5 +-
.../prometheus/promhttp/instrument_server.go | 24 +-
.../client_golang/prometheus/registry.go | 34 +-
.../client_golang/prometheus/summary.go | 9 +-
.../client_golang/prometheus/timer.go | 11 +-
.../prometheus/client_model/go/metrics.pb.go | 1533 ++-
vendor/github.com/quic-go/qpack/.codecov.yml | 7 +
vendor/github.com/quic-go/qpack/.gitignore | 6 +
vendor/github.com/quic-go/qpack/.gitmodules | 3 +
vendor/github.com/quic-go/qpack/.golangci.yml | 27 +
.../Licence => quic-go/qpack/LICENSE.md} | 2 +-
vendor/github.com/quic-go/qpack/README.md | 20 +
vendor/github.com/quic-go/qpack/decoder.go | 271 +
vendor/github.com/quic-go/qpack/encoder.go | 95 +
.../github.com/quic-go/qpack/header_field.go | 16 +
.../github.com/quic-go/qpack/static_table.go | 255 +
vendor/github.com/quic-go/qpack/tools.go | 5 +
vendor/github.com/quic-go/qpack/varint.go | 66 +
.../qtls-go1-19}/LICENSE | 0
.../github.com/quic-go/qtls-go1-19/README.md | 6 +
.../qtls-go1-19}/alert.go | 0
.../qtls-go1-19/auth.go | 0
.../qtls-go1-19/cipher_suites.go | 0
.../qtls-go1-19/common.go | 5 +-
.../qtls-go1-19/conn.go | 83 +-
.../qtls-go1-19}/cpu.go | 0
.../qtls-go1-19}/cpu_other.go | 0
.../qtls-go1-19/handshake_client.go | 132 +-
.../qtls-go1-19/handshake_client_tls13.go | 79 +-
.../qtls-go1-19/handshake_messages.go | 728 +-
.../qtls-go1-19/handshake_server.go | 87 +-
.../qtls-go1-19/handshake_server_tls13.go | 103 +-
.../qtls-go1-19}/key_agreement.go | 0
.../qtls-go1-19}/key_schedule.go | 19 +-
.../qtls-go1-19/notboring.go | 0
.../qtls-go1-19}/prf.go | 0
.../qtls-go1-19}/ticket.go | 17 +-
.../qtls-go1-19}/tls.go | 0
.../qtls-go1-19}/unsafe.go | 0
.../qtls-go1-20}/LICENSE | 0
.../github.com/quic-go/qtls-go1-20/README.md | 6 +
.../qtls-go1-20}/alert.go | 0
.../qtls-go1-20}/auth.go | 4 +
.../github.com/quic-go/qtls-go1-20/cache.go | 95 +
.../qtls-go1-20}/cipher_suites.go | 64 +-
.../qtls-go1-20}/common.go | 53 +-
.../qtls-go1-20}/conn.go | 144 +-
.../qtls-go1-20}/cpu.go | 0
.../qtls-go1-20}/cpu_other.go | 0
.../qtls-go1-20}/handshake_client.go | 171 +-
.../qtls-go1-20}/handshake_client_tls13.go | 118 +-
.../qtls-go1-20}/handshake_messages.go | 748 +-
.../qtls-go1-20}/handshake_server.go | 96 +-
.../qtls-go1-20}/handshake_server_tls13.go | 129 +-
.../qtls-go1-20}/key_agreement.go | 35 +-
.../qtls-go1-20}/key_schedule.go | 121 +-
.../quic-go/qtls-go1-20/notboring.go | 18 +
.../qtls-go1-20}/prf.go | 2 +-
.../qtls-go1-20}/ticket.go | 19 +-
.../qtls-go1-20}/tls.go | 0
.../qtls-go1-20}/unsafe.go | 0
.../quic-go/.gitignore | 0
.../quic-go/.golangci.yml | 0
.../quic-go/Changelog.md | 4 +-
.../quic-go/LICENSE | 0
vendor/github.com/quic-go/quic-go/README.md | 63 +
.../quic-go/buffer_pool.go | 2 +-
.../quic-go/client.go | 29 +-
.../github.com/quic-go/quic-go/closed_conn.go | 64 +
.../quic-go/codecov.yml | 1 +
.../quic-go/config.go | 47 +-
.../quic-go/conn_id_generator.go | 45 +-
.../quic-go/conn_id_manager.go | 27 +-
.../quic-go/connection.go | 701 +-
.../quic-go/quic-go/connection_timer.go | 51 +
.../quic-go/crypto_stream.go | 12 +-
.../quic-go/crypto_stream_manager.go | 4 +-
.../quic-go/datagram_queue.go | 70 +-
.../quic-go/errors.go | 9 +-
.../quic-go/frame_sorter.go | 31 +-
.../quic-go/framer.go | 39 +-
.../github.com/quic-go/quic-go/http3/body.go | 135 +
.../quic-go/quic-go/http3/capsule.go | 55 +
.../quic-go/quic-go/http3/client.go | 457 +
.../quic-go/quic-go/http3/error_codes.go | 73 +
.../quic-go/quic-go/http3/frames.go | 164 +
.../quic-go/quic-go/http3/gzip_reader.go | 39 +
.../quic-go/quic-go/http3/http_stream.go | 76 +
.../quic-go/quic-go/http3/request.go | 111 +
.../quic-go/quic-go/http3/request_writer.go | 283 +
.../quic-go/quic-go/http3/response_writer.go | 137 +
.../quic-go/quic-go/http3/roundtrip.go | 247 +
.../quic-go/quic-go/http3/server.go | 752 ++
.../quic-go/interface.go | 89 +-
.../internal/ackhandler/ack_eliciting.go | 4 +-
.../quic-go/internal/ackhandler/ackhandler.go | 23 +
.../quic-go/internal/ackhandler/frame.go | 29 +
.../quic-go/internal/ackhandler/interfaces.go | 20 +-
.../quic-go/internal/ackhandler/mockgen.go | 3 +
.../quic-go/internal/ackhandler/packet.go | 55 +
.../ackhandler/packet_number_generator.go | 6 +-
.../ackhandler/received_packet_handler.go | 25 +-
.../ackhandler/received_packet_history.go | 47 +-
.../ackhandler/received_packet_tracker.go | 38 +-
.../quic-go/internal/ackhandler/send_mode.go | 0
.../ackhandler/sent_packet_handler.go | 73 +-
.../ackhandler/sent_packet_history.go | 163 +
.../quic-go/internal/congestion/bandwidth.go | 2 +-
.../quic-go/internal/congestion/clock.go | 0
.../quic-go/internal/congestion/cubic.go | 6 +-
.../internal/congestion/cubic_sender.go | 10 +-
.../internal/congestion/hybrid_slow_start.go | 8 +-
.../quic-go/internal/congestion/interface.go | 2 +-
.../quic-go/internal/congestion/pacer.go | 10 +-
.../flowcontrol/base_flow_controller.go | 8 +-
.../flowcontrol/connection_flow_controller.go | 8 +-
.../quic-go/internal/flowcontrol/interface.go | 2 +-
.../flowcontrol/stream_flow_controller.go | 8 +-
.../quic-go/internal/handshake/aead.go | 8 +-
.../internal/handshake/crypto_setup.go | 83 +-
.../internal/handshake/header_protector.go | 4 +-
.../quic-go/internal/handshake/hkdf.go | 0
.../internal/handshake/initial_aead.go | 8 +-
.../quic-go/internal/handshake/interface.go | 6 +-
.../quic-go/internal/handshake/mockgen.go | 3 +
.../quic-go/internal/handshake/retry.go | 70 +
.../internal/handshake/session_ticket.go | 13 +-
.../handshake/tls_extension_handler.go | 6 +-
.../internal/handshake/token_generator.go | 41 +-
.../internal/handshake/token_protector.go | 0
.../internal/handshake/updatable_aead.go | 12 +-
.../quic-go/internal/logutils/frame.go | 23 +-
.../internal/protocol/connection_id.go | 116 +
.../internal/protocol/encryption_level.go | 0
.../quic-go/internal/protocol/key_phase.go | 0
.../internal/protocol/packet_number.go | 0
.../quic-go/internal/protocol/params.go | 2 +-
.../quic-go/internal/protocol/perspective.go | 0
.../quic-go/internal/protocol/protocol.go | 0
.../quic-go/internal/protocol/stream.go | 0
.../quic-go/internal/protocol/version.go | 2 +-
.../quic-go/internal/qerr/error_codes.go | 4 +-
.../quic-go/internal/qerr/errors.go | 19 +-
.../quic-go/internal/qtls/go119.go | 7 +-
.../quic-go/internal/qtls/go120.go} | 9 +-
.../quic-go/quic-go/internal/qtls/go121.go | 5 +
.../quic-go/internal/qtls/go_oldversion.go | 5 +
.../internal/utils/buffered_write_closer.go | 0
.../quic-go/internal/utils/byteorder.go | 4 +
.../internal/utils/byteorder_big_endian.go | 14 +
.../quic-go/internal/utils/ip.go | 0
.../internal/utils/linkedlist/README.md | 6 +
.../internal/utils/linkedlist/linkedlist.go} | 149 +-
.../quic-go/internal/utils/log.go | 2 +-
.../quic-go/quic-go/internal/utils/minmax.go | 72 +
.../quic-go/internal/utils/rand.go | 0
.../quic-go/internal/utils/rtt_stats.go | 8 +-
.../quic-go/internal/utils/timer.go | 4 +
.../quic-go/internal/wire/ack_frame.go | 38 +-
.../quic-go/internal/wire/ack_frame_pool.go | 24 +
.../quic-go/internal/wire/ack_range.go | 2 +-
.../internal/wire/connection_close_frame.go | 20 +-
.../quic-go/internal/wire/crypto_frame.go | 16 +-
.../internal/wire/data_blocked_frame.go | 13 +-
.../quic-go/internal/wire/datagram_frame.go | 16 +-
.../quic-go/internal/wire/extended_header.go | 210 +
.../quic-go/internal/wire/frame_parser.go | 71 +-
.../internal/wire/handshake_done_frame.go | 7 +-
.../quic-go/internal/wire/header.go | 146 +-
.../quic-go/internal/wire/interface.go | 8 +-
.../quic-go/internal/wire/log.go | 4 +-
.../quic-go/internal/wire/max_data_frame.go | 14 +-
.../internal/wire/max_stream_data_frame.go | 14 +-
.../internal/wire/max_streams_frame.go | 14 +-
.../internal/wire/new_connection_id_frame.go | 25 +-
.../quic-go/internal/wire/new_token_frame.go | 14 +-
.../internal/wire/path_challenge_frame.go | 10 +-
.../internal/wire/path_response_frame.go | 10 +-
.../quic-go/internal/wire/ping_frame.go | 9 +-
.../quic-go/internal/wire/pool.go | 2 +-
.../internal/wire/reset_stream_frame.go | 18 +-
.../wire/retire_connection_id_frame.go | 12 +-
.../quic-go/internal/wire/short_header.go | 73 +
.../internal/wire/stop_sending_frame.go | 16 +-
.../wire/stream_data_blocked_frame.go | 14 +-
.../quic-go/internal/wire/stream_frame.go | 32 +-
.../internal/wire/streams_blocked_frame.go | 14 +-
.../internal/wire/transport_parameters.go | 128 +-
.../internal/wire/version_negotiation.go | 39 +-
.../quic-go/logging/frame.go | 2 +-
.../quic-go/logging/interface.go | 32 +-
.../quic-go/quic-go/logging/mockgen.go | 4 +
.../quic-go/logging/multiplex.go | 34 +-
.../quic-go/quic-go/logging/null_tracer.go | 62 +
.../quic-go/logging/packet_header.go | 5 +-
.../quic-go/logging/types.go | 0
vendor/github.com/quic-go/quic-go/mockgen.go | 27 +
.../quic-go/mockgen_private.sh | 4 +-
.../quic-go/mtu_discoverer.go | 8 +-
.../quic-go/multiplexer.go | 15 +-
.../quic-go/packet_handler_map.go | 178 +-
.../quic-go/quic-go/packet_packer.go | 968 ++
.../quic-go/packet_unpacker.go | 120 +-
.../quic-go/qlog/event.go | 38 +-
.../quic-go/qlog/frame.go | 6 +-
.../quic-go/qlog/packet_header.go | 54 +-
.../quic-go/qlog/qlog.go | 79 +-
.../quic-go/qlog/trace.go | 9 +-
.../quic-go/qlog/types.go | 12 +-
.../quic-go/quicvarint/io.go | 0
.../quic-go/quicvarint/varint.go | 41 +-
.../quic-go/receive_stream.go | 52 +-
.../quic-go/retransmission_queue.go | 26 +-
.../quic-go/send_conn.go | 0
.../quic-go/send_queue.go | 7 +
.../quic-go/send_stream.go | 91 +-
.../quic-go/server.go | 228 +-
.../quic-go/stream.go | 19 +-
.../quic-go/streams_map.go | 39 +-
.../quic-go/streams_map_incoming.go} | 61 +-
.../quic-go/streams_map_outgoing.go} | 68 +-
.../quic-go/sys_conn.go | 4 +-
.../quic-go/sys_conn_df.go | 1 -
.../quic-go/sys_conn_df_linux.go | 4 +-
.../quic-go/sys_conn_df_windows.go | 4 +-
.../quic-go/sys_conn_helper_darwin.go | 1 -
.../quic-go/sys_conn_helper_freebsd.go | 1 -
.../quic-go/sys_conn_helper_linux.go | 1 -
.../quic-go/sys_conn_no_oob.go | 1 -
.../quic-go/sys_conn_oob.go | 55 +-
.../quic-go/sys_conn_windows.go | 1 -
.../quic-go/token_store.go | 18 +-
vendor/github.com/quic-go/quic-go/tools.go | 8 +
.../quic-go/window_update_queue.go | 6 +-
.../quic-go/quic-go/zero_rtt_queue.go | 34 +
.../quic-go/webtransport-go/.gitignore | 1 +
.../webtransport-go}/LICENSE | 4 +-
.../quic-go/webtransport-go/README.md | 39 +
.../quic-go/webtransport-go/client.go | 124 +
.../quic-go/webtransport-go/codecov.yml | 9 +
.../quic-go/webtransport-go/errors.go | 78 +
.../quic-go/webtransport-go/protocol.go | 5 +
.../quic-go/webtransport-go/server.go | 227 +
.../quic-go/webtransport-go/session.go | 418 +
.../webtransport-go/session_manager.go | 195 +
.../quic-go/webtransport-go/stream.go | 214 +
.../quic-go/webtransport-go/streams_map.go | 42 +
.../quic-go/webtransport-go/version.json | 3 +
vendor/github.com/satori/go.uuid/.travis.yml | 23 -
vendor/github.com/satori/go.uuid/LICENSE | 20 -
vendor/github.com/satori/go.uuid/README.md | 65 -
vendor/github.com/satori/go.uuid/codec.go | 206 -
vendor/github.com/satori/go.uuid/generator.go | 239 -
vendor/github.com/satori/go.uuid/sql.go | 78 -
vendor/github.com/satori/go.uuid/uuid.go | 161 -
.../spacemonkeygo/spacelog/.travis.yml | 6 -
.../github.com/spacemonkeygo/spacelog/LICENSE | 191 -
.../spacemonkeygo/spacelog/README.md | 19 -
.../spacemonkeygo/spacelog/capture.go | 67 -
.../spacemonkeygo/spacelog/capture_ae.go | 25 -
.../spacemonkeygo/spacelog/capture_linux.go | 35 -
.../spacemonkeygo/spacelog/capture_other.go | 38 -
.../spacemonkeygo/spacelog/capture_solaris.go | 33 -
.../spacemonkeygo/spacelog/collection.go | 271 -
.../spacemonkeygo/spacelog/convenience.go | 296 -
.../github.com/spacemonkeygo/spacelog/doc.go | 39 -
.../spacemonkeygo/spacelog/event.go | 75 -
.../spacemonkeygo/spacelog/handler.go | 53 -
.../spacemonkeygo/spacelog/level.go | 136 -
.../spacemonkeygo/spacelog/logger.go | 61 -
.../spacemonkeygo/spacelog/output.go | 178 -
.../spacemonkeygo/spacelog/output_other.go | 19 -
.../spacemonkeygo/spacelog/output_windows.go | 17 -
.../spacemonkeygo/spacelog/setup.go | 189 -
.../spacelog/sighup_appengine.go | 37 -
.../spacemonkeygo/spacelog/sighup_other.go | 23 -
.../spacemonkeygo/spacelog/syslog.go | 65 -
.../spacemonkeygo/spacelog/templates.go | 69 -
.../spacelog/templates_others.go | 22 -
.../spacelog/templates_windows.go | 20 -
.../github.com/spacemonkeygo/spacelog/text.go | 80 -
vendor/go.opencensus.io/Makefile | 8 +-
vendor/go.opencensus.io/opencensus.go | 2 +-
vendor/go.opencensus.io/stats/doc.go | 7 +-
.../go.opencensus.io/stats/internal/record.go | 6 +
vendor/go.opencensus.io/stats/record.go | 21 +-
.../stats/view/aggregation.go | 6 +-
.../go.opencensus.io/stats/view/collector.go | 9 +-
vendor/go.opencensus.io/stats/view/doc.go | 2 +-
vendor/go.opencensus.io/stats/view/worker.go | 27 +-
vendor/go.opencensus.io/tag/profile_19.go | 1 +
vendor/go.opencensus.io/tag/profile_not19.go | 1 +
vendor/go.opencensus.io/trace/doc.go | 13 +-
vendor/go.opencensus.io/trace/lrumap.go | 2 +-
vendor/go.opencensus.io/trace/trace_go11.go | 1 +
.../go.opencensus.io/trace/trace_nongo11.go | 1 +
vendor/go.uber.org/atomic/CHANGELOG.md | 10 +
vendor/go.uber.org/atomic/bool.go | 2 +-
vendor/go.uber.org/atomic/duration.go | 2 +-
vendor/go.uber.org/atomic/error.go | 14 +-
vendor/go.uber.org/atomic/float32.go | 2 +-
vendor/go.uber.org/atomic/float64.go | 2 +-
vendor/go.uber.org/atomic/int32.go | 2 +-
vendor/go.uber.org/atomic/int64.go | 2 +-
vendor/go.uber.org/atomic/pointer_go118.go | 41 +-
.../atomic/pointer_go118_pre119.go | 60 +
vendor/go.uber.org/atomic/string.go | 23 +-
vendor/go.uber.org/atomic/string_ext.go | 15 +-
vendor/go.uber.org/atomic/time.go | 2 +-
vendor/go.uber.org/atomic/uint32.go | 2 +-
vendor/go.uber.org/atomic/uint64.go | 2 +-
vendor/go.uber.org/atomic/uintptr.go | 2 +-
vendor/go.uber.org/dig/.codecov.yml | 19 +
vendor/go.uber.org/dig/.gitignore | 12 +
vendor/go.uber.org/dig/CHANGELOG.md | 294 +
.../LICENSE-MIT => go.uber.org/dig/LICENSE} | 2 +-
vendor/go.uber.org/dig/Makefile | 67 +
vendor/go.uber.org/dig/README.md | 51 +
vendor/go.uber.org/dig/callback.go | 108 +
vendor/go.uber.org/dig/check_license.sh | 17 +
vendor/go.uber.org/dig/constructor.go | 238 +
vendor/go.uber.org/dig/container.go | 282 +
vendor/go.uber.org/dig/cycle_error.go | 79 +
vendor/go.uber.org/dig/decorate.go | 313 +
vendor/go.uber.org/dig/doc.go | 348 +
vendor/go.uber.org/dig/error.go | 505 +
vendor/go.uber.org/dig/glide.yaml | 7 +
vendor/go.uber.org/dig/graph.go | 115 +
vendor/go.uber.org/dig/group.go | 67 +
vendor/go.uber.org/dig/inout.go | 175 +
.../dig/internal/digerror/errors.go | 34 +
.../dig/internal/digreflect/func.go | 125 +
vendor/go.uber.org/dig/internal/dot/README.md | 61 +
vendor/go.uber.org/dig/internal/dot/graph.go | 466 +
.../go.uber.org/dig/internal/graph/graph.go | 118 +
vendor/go.uber.org/dig/invoke.go | 211 +
vendor/go.uber.org/dig/param.go | 668 ++
vendor/go.uber.org/dig/provide.go | 665 ++
vendor/go.uber.org/dig/result.go | 535 +
vendor/go.uber.org/dig/scope.go | 321 +
vendor/go.uber.org/dig/version.go | 24 +
vendor/go.uber.org/dig/visualize.go | 192 +
vendor/go.uber.org/fx/.codecov.yml | 17 +
vendor/go.uber.org/fx/.gitignore | 16 +
vendor/go.uber.org/fx/CHANGELOG.md | 375 +
vendor/go.uber.org/fx/CONTRIBUTING.md | 291 +
.../LICENSE-MIT => go.uber.org/fx/LICENSE} | 10 +-
vendor/go.uber.org/fx/Makefile | 86 +
vendor/go.uber.org/fx/README.md | 40 +
vendor/go.uber.org/fx/annotated.go | 1658 +++
vendor/go.uber.org/fx/app.go | 810 ++
vendor/go.uber.org/fx/app_unixes.go | 29 +
vendor/go.uber.org/fx/app_windows.go | 29 +
vendor/go.uber.org/fx/checklicense.sh | 17 +
vendor/go.uber.org/fx/decorate.go | 230 +
vendor/go.uber.org/fx/doc.go | 39 +
vendor/go.uber.org/fx/extract.go | 156 +
vendor/go.uber.org/fx/fxevent/console.go | 143 +
vendor/go.uber.org/fx/fxevent/event.go | 240 +
vendor/go.uber.org/fx/fxevent/logger.go | 38 +
vendor/go.uber.org/fx/fxevent/zap.go | 209 +
vendor/go.uber.org/fx/inout.go | 364 +
.../go.uber.org/fx/internal/fxclock/clock.go | 58 +
.../go.uber.org/fx/internal/fxlog/default.go | 32 +
vendor/go.uber.org/fx/internal/fxlog/spy.go | 91 +
.../fx/internal/fxreflect/fxreflect.go | 86 +
.../fx/internal/fxreflect/stack.go | 149 +
.../fx/internal/lifecycle/lifecycle.go | 398 +
vendor/go.uber.org/fx/invoke.go | 110 +
vendor/go.uber.org/fx/lifecycle.go | 147 +
vendor/go.uber.org/fx/log.go | 50 +
vendor/go.uber.org/fx/module.go | 290 +
vendor/go.uber.org/fx/populate.go | 67 +
vendor/go.uber.org/fx/printer_writer.go | 36 +
vendor/go.uber.org/fx/provide.go | 187 +
vendor/go.uber.org/fx/replace.go | 142 +
vendor/go.uber.org/fx/shutdown.go | 111 +
vendor/go.uber.org/fx/signal.go | 253 +
vendor/go.uber.org/fx/supply.go | 151 +
vendor/go.uber.org/fx/version.go | 24 +
vendor/go.uber.org/multierr/CHANGELOG.md | 23 +
vendor/go.uber.org/multierr/README.md | 22 +-
vendor/go.uber.org/multierr/error.go | 406 +-
.../go.uber.org/multierr/error_post_go120.go | 48 +
.../go.uber.org/multierr/error_pre_go120.go | 79 +
vendor/go.uber.org/multierr/glide.yaml | 8 -
vendor/go.uber.org/zap/CHANGELOG.md | 25 +
vendor/go.uber.org/zap/array_go118.go | 42 +-
.../go.uber.org/zap/internal/level_enabler.go | 35 +
vendor/go.uber.org/zap/level.go | 3 +
vendor/go.uber.org/zap/logger.go | 7 +
vendor/go.uber.org/zap/options.go | 3 +-
vendor/go.uber.org/zap/sink.go | 100 +-
vendor/go.uber.org/zap/stacktrace.go | 2 +-
vendor/go.uber.org/zap/sugar.go | 31 +-
vendor/go.uber.org/zap/writer.go | 6 +-
vendor/go.uber.org/zap/zapcore/core.go | 9 +
vendor/go.uber.org/zap/zapcore/entry.go | 3 +-
vendor/go.uber.org/zap/zapcore/hook.go | 9 +
.../go.uber.org/zap/zapcore/increase_level.go | 9 +
vendor/go.uber.org/zap/zapcore/level.go | 42 +
vendor/go.uber.org/zap/zapcore/sampler.go | 11 +-
vendor/go.uber.org/zap/zapcore/tee.go | 17 +-
vendor/golang.org/x/crypto/AUTHORS | 3 -
vendor/golang.org/x/crypto/CONTRIBUTORS | 3 -
.../x/crypto/chacha20/chacha_generic.go | 4 +-
.../chacha20poly1305_amd64.go | 6 +-
.../chacha20poly1305_generic.go | 6 +-
vendor/golang.org/x/crypto/cryptobyte/asn1.go | 79 +-
.../golang.org/x/crypto/cryptobyte/builder.go | 14 +-
.../golang.org/x/crypto/cryptobyte/string.go | 11 +
.../x/crypto/curve25519/curve25519.go | 9 +-
.../curve25519/internal/field/fe_generic.go | 2 +-
.../{subtle/aliasing.go => alias/alias.go} | 5 +-
.../alias_purego.go} | 5 +-
.../x/crypto/salsa20/salsa/hsalsa20.go | 66 +-
.../x/crypto/salsa20/salsa/salsa208.go | 66 +-
.../x/crypto/salsa20/salsa/salsa20_ref.go | 66 +-
vendor/golang.org/x/crypto/sha3/keccakf.go | 194 +-
.../qtls-go1-18 => golang.org/x/exp}/LICENSE | 0
vendor/golang.org/x/exp/PATENTS | 22 +
.../x/exp/constraints/constraints.go | 50 +
vendor/golang.org/x/exp/slices/slices.go | 258 +
vendor/golang.org/x/exp/slices/sort.go | 126 +
vendor/golang.org/x/exp/slices/zsortfunc.go | 479 +
.../golang.org/x/exp/slices/zsortordered.go | 481 +
vendor/golang.org/x/mod/modfile/print.go | 174 +
vendor/golang.org/x/mod/modfile/read.go | 958 ++
vendor/golang.org/x/mod/modfile/rule.go | 1559 +++
vendor/golang.org/x/mod/modfile/work.go | 234 +
vendor/golang.org/x/mod/module/module.go | 4 +-
.../golang.org/x/net/bpf/vm_instructions.go | 4 +-
vendor/golang.org/x/net/context/go17.go | 4 +-
vendor/golang.org/x/net/html/doc.go | 21 +
vendor/golang.org/x/net/html/escape.go | 81 +
vendor/golang.org/x/net/html/parse.go | 4 +-
vendor/golang.org/x/net/html/render.go | 2 +-
vendor/golang.org/x/net/html/token.go | 59 +-
vendor/golang.org/x/net/http2/flow.go | 88 +-
vendor/golang.org/x/net/http2/frame.go | 11 +-
vendor/golang.org/x/net/http2/headermap.go | 18 +
vendor/golang.org/x/net/http2/hpack/encode.go | 5 +
vendor/golang.org/x/net/http2/hpack/hpack.go | 81 +-
.../x/net/http2/hpack/static_table.go | 188 +
vendor/golang.org/x/net/http2/hpack/tables.go | 78 +-
vendor/golang.org/x/net/http2/pipe.go | 6 +-
vendor/golang.org/x/net/http2/server.go | 331 +-
vendor/golang.org/x/net/http2/transport.go | 311 +-
.../x/net/internal/socket/mmsghdr_unix.go | 18 +-
.../x/net/internal/socket/msghdr_linux.go | 3 -
.../x/net/internal/socket/sys_zos_s390x.go | 1 +
.../net/internal/socket/zsys_openbsd_ppc64.go | 30 +
.../internal/socket/zsys_openbsd_riscv64.go | 30 +
.../golang.org/x/net/internal/socks/socks.go | 2 +-
vendor/golang.org/x/net/ipv6/dgramopt.go | 2 +-
vendor/golang.org/x/net/trace/histogram.go | 2 +-
vendor/golang.org/x/net/trace/trace.go | 2 +-
vendor/golang.org/x/net/websocket/hybi.go | 2 +-
.../golang.org/x/net/websocket/websocket.go | 7 +-
vendor/golang.org/x/sync/errgroup/errgroup.go | 4 +-
vendor/golang.org/x/sys/cpu/cpu_arm64.go | 5 +-
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 1 +
.../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +-
.../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 15 +
vendor/golang.org/x/sys/cpu/endian_big.go | 11 +
vendor/golang.org/x/sys/cpu/endian_little.go | 11 +
vendor/golang.org/x/sys/cpu/hwcap_linux.go | 15 +
vendor/golang.org/x/sys/cpu/parse.go | 43 +
.../x/sys/cpu/proc_cpuinfo_linux.go | 54 +
vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 +
.../x/sys/cpu/runtime_auxv_go121.go | 19 +
vendor/golang.org/x/sys/execabs/execabs.go | 2 +-
.../golang.org/x/sys/execabs/execabs_go118.go | 6 +
.../golang.org/x/sys/execabs/execabs_go119.go | 12 +-
vendor/golang.org/x/sys/plan9/mkerrors.sh | 4 +-
vendor/golang.org/x/sys/plan9/syscall.go | 10 +-
vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 31 +
vendor/golang.org/x/sys/unix/dirent.go | 4 +-
vendor/golang.org/x/sys/unix/gccgo.go | 4 +-
vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +-
vendor/golang.org/x/sys/unix/ioctl_linux.go | 20 +-
vendor/golang.org/x/sys/unix/ioctl_signed.go | 70 +
.../sys/unix/{ioctl.go => ioctl_unsigned.go} | 21 +-
vendor/golang.org/x/sys/unix/ioctl_zos.go | 20 +-
vendor/golang.org/x/sys/unix/mkall.sh | 31 +-
vendor/golang.org/x/sys/unix/mkerrors.sh | 9 +-
vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 +
vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 +
vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 14 +
vendor/golang.org/x/sys/unix/str.go | 27 -
vendor/golang.org/x/sys/unix/syscall.go | 10 +-
vendor/golang.org/x/sys/unix/syscall_aix.go | 7 +-
.../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 -
.../x/sys/unix/syscall_aix_ppc64.go | 1 -
vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +-
.../x/sys/unix/syscall_darwin.1_12.go | 32 -
.../x/sys/unix/syscall_darwin.1_13.go | 108 -
.../golang.org/x/sys/unix/syscall_darwin.go | 106 +-
.../x/sys/unix/syscall_darwin_amd64.go | 1 +
.../x/sys/unix/syscall_darwin_arm64.go | 1 +
.../x/sys/unix/syscall_dragonfly.go | 3 +-
.../golang.org/x/sys/unix/syscall_freebsd.go | 45 +-
.../x/sys/unix/syscall_freebsd_386.go | 12 +-
.../x/sys/unix/syscall_freebsd_amd64.go | 12 +-
.../x/sys/unix/syscall_freebsd_arm.go | 10 +-
.../x/sys/unix/syscall_freebsd_arm64.go | 10 +-
.../x/sys/unix/syscall_freebsd_riscv64.go | 10 +-
vendor/golang.org/x/sys/unix/syscall_hurd.go | 30 +
.../golang.org/x/sys/unix/syscall_hurd_386.go | 29 +
.../golang.org/x/sys/unix/syscall_illumos.go | 106 -
vendor/golang.org/x/sys/unix/syscall_linux.go | 141 +-
.../x/sys/unix/syscall_linux_386.go | 31 -
.../x/sys/unix/syscall_linux_amd64.go | 5 -
.../x/sys/unix/syscall_linux_arm.go | 31 -
.../x/sys/unix/syscall_linux_arm64.go | 14 -
.../x/sys/unix/syscall_linux_loong64.go | 9 -
.../x/sys/unix/syscall_linux_mips64x.go | 5 -
.../x/sys/unix/syscall_linux_mipsx.go | 31 -
.../x/sys/unix/syscall_linux_ppc.go | 31 -
.../x/sys/unix/syscall_linux_ppc64x.go | 5 -
.../x/sys/unix/syscall_linux_riscv64.go | 5 -
.../x/sys/unix/syscall_linux_s390x.go | 5 -
.../x/sys/unix/syscall_linux_sparc64.go | 5 -
.../golang.org/x/sys/unix/syscall_netbsd.go | 22 +-
.../golang.org/x/sys/unix/syscall_openbsd.go | 3 +-
.../x/sys/unix/syscall_openbsd_libc.go | 4 +-
.../x/sys/unix/syscall_openbsd_ppc64.go | 42 +
.../x/sys/unix/syscall_openbsd_riscv64.go | 42 +
.../golang.org/x/sys/unix/syscall_solaris.go | 240 +-
vendor/golang.org/x/sys/unix/syscall_unix.go | 84 +-
.../golang.org/x/sys/unix/syscall_unix_gc.go | 6 +-
.../x/sys/unix/syscall_zos_s390x.go | 179 +-
vendor/golang.org/x/sys/unix/sysvshm_unix.go | 13 +-
vendor/golang.org/x/sys/unix/timestruct.go | 2 +-
vendor/golang.org/x/sys/unix/xattr_bsd.go | 104 +-
.../x/sys/unix/zerrors_darwin_amd64.go | 19 +
.../x/sys/unix/zerrors_darwin_arm64.go | 19 +
vendor/golang.org/x/sys/unix/zerrors_linux.go | 54 +-
.../x/sys/unix/zerrors_linux_386.go | 1 +
.../x/sys/unix/zerrors_linux_amd64.go | 1 +
.../x/sys/unix/zerrors_linux_arm.go | 1 +
.../x/sys/unix/zerrors_linux_arm64.go | 1 +
.../x/sys/unix/zerrors_linux_loong64.go | 1 +
.../x/sys/unix/zerrors_linux_mips.go | 1 +
.../x/sys/unix/zerrors_linux_mips64.go | 1 +
.../x/sys/unix/zerrors_linux_mips64le.go | 1 +
.../x/sys/unix/zerrors_linux_mipsle.go | 1 +
.../x/sys/unix/zerrors_linux_ppc.go | 1 +
.../x/sys/unix/zerrors_linux_ppc64.go | 1 +
.../x/sys/unix/zerrors_linux_ppc64le.go | 1 +
.../x/sys/unix/zerrors_linux_riscv64.go | 1 +
.../x/sys/unix/zerrors_linux_s390x.go | 1 +
.../x/sys/unix/zerrors_linux_sparc64.go | 1 +
.../x/sys/unix/zerrors_openbsd_386.go | 356 +-
.../x/sys/unix/zerrors_openbsd_amd64.go | 189 +-
.../x/sys/unix/zerrors_openbsd_arm.go | 348 +-
.../x/sys/unix/zerrors_openbsd_arm64.go | 160 +-
.../x/sys/unix/zerrors_openbsd_mips64.go | 95 +-
.../x/sys/unix/zerrors_openbsd_ppc64.go | 1905 ++++
.../x/sys/unix/zerrors_openbsd_riscv64.go | 1904 ++++
.../x/sys/unix/zptrace_armnn_linux.go | 8 +-
.../x/sys/unix/zptrace_linux_arm64.go | 4 +-
.../x/sys/unix/zptrace_mipsnn_linux.go | 8 +-
.../x/sys/unix/zptrace_mipsnnle_linux.go | 8 +-
.../x/sys/unix/zptrace_x86_linux.go | 8 +-
.../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 23 +-
.../x/sys/unix/zsyscall_aix_ppc64.go | 24 +-
.../x/sys/unix/zsyscall_aix_ppc64_gc.go | 17 +-
.../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +-
.../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 -
.../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 25 -
.../x/sys/unix/zsyscall_darwin_amd64.go | 87 +-
.../x/sys/unix/zsyscall_darwin_amd64.s | 32 +-
.../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 -
.../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 25 -
.../x/sys/unix/zsyscall_darwin_arm64.go | 87 +-
.../x/sys/unix/zsyscall_darwin_arm64.s | 32 +-
.../x/sys/unix/zsyscall_dragonfly_amd64.go | 30 +-
.../x/sys/unix/zsyscall_freebsd_386.go | 40 +-
.../x/sys/unix/zsyscall_freebsd_amd64.go | 40 +-
.../x/sys/unix/zsyscall_freebsd_arm.go | 40 +-
.../x/sys/unix/zsyscall_freebsd_arm64.go | 40 +-
.../x/sys/unix/zsyscall_freebsd_riscv64.go | 40 +-
.../x/sys/unix/zsyscall_illumos_amd64.go | 28 +-
.../golang.org/x/sys/unix/zsyscall_linux.go | 41 +-
.../x/sys/unix/zsyscall_linux_386.go | 50 -
.../x/sys/unix/zsyscall_linux_amd64.go | 50 -
.../x/sys/unix/zsyscall_linux_arm.go | 50 -
.../x/sys/unix/zsyscall_linux_arm64.go | 50 -
.../x/sys/unix/zsyscall_linux_loong64.go | 40 -
.../x/sys/unix/zsyscall_linux_mips.go | 50 -
.../x/sys/unix/zsyscall_linux_mips64.go | 50 -
.../x/sys/unix/zsyscall_linux_mips64le.go | 50 -
.../x/sys/unix/zsyscall_linux_mipsle.go | 50 -
.../x/sys/unix/zsyscall_linux_ppc.go | 50 -
.../x/sys/unix/zsyscall_linux_ppc64.go | 50 -
.../x/sys/unix/zsyscall_linux_ppc64le.go | 50 -
.../x/sys/unix/zsyscall_linux_riscv64.go | 50 -
.../x/sys/unix/zsyscall_linux_s390x.go | 50 -
.../x/sys/unix/zsyscall_linux_sparc64.go | 50 -
.../x/sys/unix/zsyscall_netbsd_386.go | 30 +-
.../x/sys/unix/zsyscall_netbsd_amd64.go | 30 +-
.../x/sys/unix/zsyscall_netbsd_arm.go | 30 +-
.../x/sys/unix/zsyscall_netbsd_arm64.go | 30 +-
.../x/sys/unix/zsyscall_openbsd_386.go | 36 +-
.../x/sys/unix/zsyscall_openbsd_386.s | 142 +-
.../x/sys/unix/zsyscall_openbsd_amd64.go | 36 +-
.../x/sys/unix/zsyscall_openbsd_amd64.s | 142 +-
.../x/sys/unix/zsyscall_openbsd_arm.go | 820 +-
.../x/sys/unix/zsyscall_openbsd_arm.s | 664 ++
.../x/sys/unix/zsyscall_openbsd_arm64.go | 36 +-
.../x/sys/unix/zsyscall_openbsd_arm64.s | 142 +-
.../x/sys/unix/zsyscall_openbsd_mips64.go | 820 +-
.../x/sys/unix/zsyscall_openbsd_mips64.s | 664 ++
.../x/sys/unix/zsyscall_openbsd_ppc64.go | 2229 ++++
.../x/sys/unix/zsyscall_openbsd_ppc64.s | 796 ++
.../x/sys/unix/zsyscall_openbsd_riscv64.go | 2229 ++++
.../x/sys/unix/zsyscall_openbsd_riscv64.s | 664 ++
.../x/sys/unix/zsyscall_solaris_amd64.go | 67 +-
.../x/sys/unix/zsyscall_zos_s390x.go | 12 +-
.../x/sys/unix/zsysctl_openbsd_386.go | 51 +-
.../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +-
.../x/sys/unix/zsysctl_openbsd_arm.go | 51 +-
.../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +-
.../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +-
.../x/sys/unix/zsysctl_openbsd_ppc64.go | 281 +
.../x/sys/unix/zsysctl_openbsd_riscv64.go | 282 +
.../x/sys/unix/zsysnum_openbsd_arm.go | 1 +
.../x/sys/unix/zsysnum_openbsd_mips64.go | 1 +
.../x/sys/unix/zsysnum_openbsd_ppc64.go | 218 +
.../x/sys/unix/zsysnum_openbsd_riscv64.go | 219 +
.../x/sys/unix/ztypes_darwin_amd64.go | 11 +
.../x/sys/unix/ztypes_darwin_arm64.go | 11 +
.../x/sys/unix/ztypes_freebsd_386.go | 15 +-
.../x/sys/unix/ztypes_freebsd_amd64.go | 16 +-
.../x/sys/unix/ztypes_freebsd_arm.go | 16 +-
.../x/sys/unix/ztypes_freebsd_arm64.go | 16 +-
.../x/sys/unix/ztypes_freebsd_riscv64.go | 16 +-
.../x/sys/unix/ztypes_illumos_amd64.go | 42 -
vendor/golang.org/x/sys/unix/ztypes_linux.go | 349 +-
.../golang.org/x/sys/unix/ztypes_linux_386.go | 8 +-
.../x/sys/unix/ztypes_linux_amd64.go | 8 +-
.../golang.org/x/sys/unix/ztypes_linux_arm.go | 8 +-
.../x/sys/unix/ztypes_linux_arm64.go | 8 +-
.../x/sys/unix/ztypes_linux_loong64.go | 8 +-
.../x/sys/unix/ztypes_linux_mips.go | 8 +-
.../x/sys/unix/ztypes_linux_mips64.go | 8 +-
.../x/sys/unix/ztypes_linux_mips64le.go | 8 +-
.../x/sys/unix/ztypes_linux_mipsle.go | 8 +-
.../golang.org/x/sys/unix/ztypes_linux_ppc.go | 8 +-
.../x/sys/unix/ztypes_linux_ppc64.go | 8 +-
.../x/sys/unix/ztypes_linux_ppc64le.go | 8 +-
.../x/sys/unix/ztypes_linux_riscv64.go | 8 +-
.../x/sys/unix/ztypes_linux_s390x.go | 8 +-
.../x/sys/unix/ztypes_linux_sparc64.go | 8 +-
.../x/sys/unix/ztypes_netbsd_386.go | 84 +
.../x/sys/unix/ztypes_netbsd_amd64.go | 84 +
.../x/sys/unix/ztypes_netbsd_arm.go | 84 +
.../x/sys/unix/ztypes_netbsd_arm64.go | 84 +
.../x/sys/unix/ztypes_openbsd_386.go | 97 +-
.../x/sys/unix/ztypes_openbsd_amd64.go | 33 +-
.../x/sys/unix/ztypes_openbsd_arm.go | 9 +-
.../x/sys/unix/ztypes_openbsd_arm64.go | 9 +-
.../x/sys/unix/ztypes_openbsd_mips64.go | 9 +-
.../x/sys/unix/ztypes_openbsd_ppc64.go | 571 +
.../x/sys/unix/ztypes_openbsd_riscv64.go | 571 +
.../x/sys/unix/ztypes_solaris_amd64.go | 35 +
.../golang.org/x/sys/unix/ztypes_zos_s390x.go | 11 +-
.../golang.org/x/sys/windows/env_windows.go | 6 +-
.../golang.org/x/sys/windows/exec_windows.go | 7 +-
vendor/golang.org/x/sys/windows/service.go | 7 +
.../x/sys/windows/setupapi_windows.go | 2 +-
vendor/golang.org/x/sys/windows/syscall.go | 10 +-
.../x/sys/windows/syscall_windows.go | 56 +-
.../golang.org/x/sys/windows/types_windows.go | 140 +-
.../x/sys/windows/zsyscall_windows.go | 131 +
vendor/golang.org/x/term/AUTHORS | 3 -
vendor/golang.org/x/term/CONTRIBUTORS | 3 -
vendor/golang.org/x/term/term.go | 10 +-
vendor/golang.org/x/term/terminal.go | 3 +-
vendor/golang.org/x/text/AUTHORS | 3 -
vendor/golang.org/x/text/CONTRIBUTORS | 3 -
.../x/text/encoding/htmlindex/tables.go | 9 +
.../text/encoding/internal/identifier/mib.go | 8 +
.../x/text/encoding/internal/internal.go | 2 +-
.../x/text/encoding/simplifiedchinese/gbk.go | 6 +-
.../internal/language/compact/language.go | 2 +-
.../text/internal/language/compact/tables.go | 6 +-
.../x/text/internal/language/language.go | 2 +-
.../x/text/internal/language/lookup.go | 4 +-
.../x/text/internal/language/parse.go | 24 +-
.../x/text/internal/language/tables.go | 868 +-
.../internal/utf8internal/utf8internal.go | 2 +-
vendor/golang.org/x/text/language/doc.go | 44 +-
vendor/golang.org/x/text/language/go1_1.go | 39 -
vendor/golang.org/x/text/language/go1_2.go | 12 -
vendor/golang.org/x/text/language/language.go | 2 +-
vendor/golang.org/x/text/language/match.go | 2 +-
vendor/golang.org/x/text/language/parse.go | 8 +-
vendor/golang.org/x/text/language/tables.go | 18 +-
vendor/golang.org/x/text/runes/runes.go | 2 +-
vendor/golang.org/x/text/unicode/bidi/core.go | 26 +-
.../golang.org/x/text/unicode/bidi/trieval.go | 12 -
.../x/text/unicode/norm/forminfo.go | 11 +-
.../x/text/unicode/norm/normalize.go | 11 +-
.../x/text/unicode/norm/tables13.0.0.go | 4 +-
.../golang.org/x/tools/cmd/goimports/doc.go | 50 +
.../x/tools/cmd/goimports/goimports.go | 380 +
.../x/tools/cmd/goimports/goimports_gc.go | 27 +
.../x/tools/cmd/goimports/goimports_not_gc.go | 12 +
.../x/tools/go/ast/inspector/inspector.go | 68 +-
.../x/tools/go/ast/inspector/typeof.go | 3 +-
.../x/tools/go/gcexportdata/gcexportdata.go | 32 +-
.../go/internal/gcimporter/gcimporter.go | 1125 --
.../golang.org/x/tools/go/packages/golist.go | 22 +-
.../x/tools/go/packages/packages.go | 87 +-
.../x/tools/go/types/objectpath/objectpath.go | 764 ++
.../internal/fastwalk/fastwalk_darwin.go | 119 +
.../internal/fastwalk/fastwalk_dirent_ino.go | 6 +-
.../fastwalk/fastwalk_dirent_namlen_bsd.go | 4 +-
.../tools/internal/fastwalk/fastwalk_unix.go | 4 +-
.../{go => }/internal/gcimporter/bexport.go | 9 +-
.../{go => }/internal/gcimporter/bimport.go | 0
.../internal/gcimporter/exportdata.go | 0
.../x/tools/internal/gcimporter/gcimporter.go | 277 +
.../{go => }/internal/gcimporter/iexport.go | 198 +-
.../{go => }/internal/gcimporter/iimport.go | 140 +-
.../internal/gcimporter/newInterface10.go | 0
.../internal/gcimporter/newInterface11.go | 0
.../internal/gcimporter/support_go117.go | 0
.../internal/gcimporter/support_go118.go | 14 +
.../internal/gcimporter/unified_no.go | 0
.../internal/gcimporter/unified_yes.go | 0
.../internal/gcimporter/ureader_no.go | 0
.../internal/gcimporter/ureader_yes.go | 211 +-
.../x/tools/internal/gocommand/invoke.go | 185 +-
.../x/tools/internal/gocommand/version.go | 54 +-
.../x/tools/internal/imports/fix.go | 48 +-
.../x/tools/internal/imports/mod.go | 22 +-
.../x/tools/internal/imports/sortimports.go | 1 +
.../x/tools/internal/imports/zstdlib.go | 606 +-
.../tools/{go => }/internal/pkgbits/codes.go | 0
.../{go => }/internal/pkgbits/decoder.go | 108 +-
.../x/tools/{go => }/internal/pkgbits/doc.go | 0
.../{go => }/internal/pkgbits/encoder.go | 20 +-
.../tools/{go => }/internal/pkgbits/flags.go | 0
.../{go => }/internal/pkgbits/frames_go1.go | 0
.../{go => }/internal/pkgbits/frames_go17.go | 0
.../tools/{go => }/internal/pkgbits/reloc.go | 4 +-
.../{go => }/internal/pkgbits/support.go | 0
.../x/tools/{go => }/internal/pkgbits/sync.go | 0
.../internal/pkgbits/syncmarker_string.go | 0
.../internal/tokeninternal/tokeninternal.go | 151 +
.../x/tools/internal/typeparams/common.go | 1 -
.../tools/internal/typesinternal/errorcode.go | 38 +-
.../typesinternal/errorcode_string.go | 26 +-
.../x/tools/internal/typesinternal/types.go | 9 +
.../cmd/protoc-gen-go/internal_gengo/init.go | 168 +
.../cmd/protoc-gen-go/internal_gengo/main.go | 896 ++
.../protoc-gen-go/internal_gengo/reflect.go | 372 +
.../internal_gengo/well_known_types.go | 1079 ++
.../protobuf/cmd/protoc-gen-go/main.go | 56 +
.../protobuf/compiler/protogen/protogen.go | 1357 +++
.../protobuf/encoding/protowire/wire.go | 8 +-
.../protobuf/internal/encoding/text/decode.go | 5 +-
.../internal/encoding/text/decode_number.go | 43 +-
.../protobuf/internal/genid/descriptor_gen.go | 90 +-
.../protobuf/internal/impl/convert.go | 1 -
.../protobuf/internal/msgfmt/format.go | 261 +
.../protobuf/internal/strs/strings_unsafe.go | 2 +-
.../protobuf/internal/version/version.go | 4 +-
.../google.golang.org/protobuf/proto/doc.go | 9 +-
.../google.golang.org/protobuf/proto/equal.go | 172 +-
.../protobuf/reflect/protopath/path.go | 122 +
.../protobuf/reflect/protopath/step.go | 241 +
.../protobuf/reflect/protorange/range.go | 316 +
.../reflect/protoreflect/source_gen.go | 14 +
.../protobuf/reflect/protoreflect/value.go | 2 +-
.../reflect/protoreflect/value_equal.go | 168 +
.../reflect/protoreflect/value_union.go | 4 +-
.../reflect/protoregistry/registry.go | 2 +-
.../types/descriptorpb/descriptor.pb.go | 1547 ++-
.../protobuf/types/dynamicpb/dynamic.go | 717 ++
.../protobuf/types/known/anypb/any.pb.go | 135 +-
.../types/known/durationpb/duration.pb.go | 63 +-
.../types/known/timestamppb/timestamp.pb.go | 61 +-
.../protobuf/types/pluginpb/plugin.pb.go | 656 ++
vendor/gopkg.in/tomb.v1/LICENSE | 29 -
vendor/gopkg.in/tomb.v1/README.md | 4 -
vendor/gopkg.in/tomb.v1/tomb.go | 176 -
vendor/lukechampine.com/blake3/bao.go | 151 +
vendor/lukechampine.com/blake3/blake3.go | 10 +-
.../lukechampine.com/blake3/compress_amd64.go | 8 +
.../lukechampine.com/blake3/compress_noasm.go | 17 +
vendor/modules.txt | 393 +-
1778 files changed, 119346 insertions(+), 91906 deletions(-)
delete mode 100644 LICENSES/vendor/github.com/cheekybits/genny/LICENSE
create mode 100644 LICENSES/vendor/github.com/golang/mock/LICENSE
create mode 100644 LICENSES/vendor/github.com/google/pprof/LICENSE
delete mode 100644 LICENSES/vendor/github.com/libp2p/go-libp2p-core/LICENSE
delete mode 100644 LICENSES/vendor/github.com/libp2p/go-openssl/LICENSE
rename LICENSES/vendor/github.com/libp2p/go-yamux/{v3 => v4}/LICENSE (99%)
delete mode 100644 LICENSES/vendor/github.com/marten-seemann/qtls-go1-17/LICENSE
delete mode 100644 LICENSES/vendor/github.com/marten-seemann/qtls-go1-18/LICENSE
delete mode 100644 LICENSES/vendor/github.com/mattn/go-pointer/LICENSE
delete mode 100644 LICENSES/vendor/github.com/nxadm/tail/LICENSE
rename LICENSES/vendor/github.com/onsi/ginkgo/{ => v2}/LICENSE (89%)
create mode 100644 LICENSES/vendor/github.com/quic-go/qpack/LICENSE
rename LICENSES/vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/LICENSE (90%)
rename LICENSES/vendor/github.com/{marten-seemann/qtls-go1-16 => quic-go/qtls-go1-20}/LICENSE (90%)
rename LICENSES/vendor/github.com/{lucas-clemente => quic-go}/quic-go/LICENSE (88%)
create mode 100644 LICENSES/vendor/github.com/quic-go/webtransport-go/LICENSE
delete mode 100644 LICENSES/vendor/github.com/satori/go.uuid/LICENSE
delete mode 100644 LICENSES/vendor/github.com/spacemonkeygo/spacelog/LICENSE
rename {vendor/github.com/mattn/go-pointer => LICENSES/vendor/go.uber.org/dig}/LICENSE (79%)
rename {vendor/github.com/cheekybits/genny => LICENSES/vendor/go.uber.org/fx}/LICENSE (79%)
rename {vendor/github.com/marten-seemann/qtls-go1-19 => LICENSES/vendor/golang.org/x/exp}/LICENSE (92%)
delete mode 100644 LICENSES/vendor/gopkg.in/tomb.v1/LICENSE
create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%)
delete mode 100644 vendor/github.com/cheekybits/genny/.gitignore
delete mode 100644 vendor/github.com/cheekybits/genny/.travis.yml
delete mode 100644 vendor/github.com/cheekybits/genny/README.md
delete mode 100644 vendor/github.com/cheekybits/genny/doc.go
delete mode 100644 vendor/github.com/cheekybits/genny/generic/doc.go
delete mode 100644 vendor/github.com/cheekybits/genny/generic/generic.go
delete mode 100644 vendor/github.com/cheekybits/genny/main.go
delete mode 100644 vendor/github.com/cheekybits/genny/out/lazy_file.go
delete mode 100644 vendor/github.com/cheekybits/genny/parse/builtins.go
delete mode 100644 vendor/github.com/cheekybits/genny/parse/doc.go
delete mode 100644 vendor/github.com/cheekybits/genny/parse/errors.go
delete mode 100644 vendor/github.com/cheekybits/genny/parse/parse.go
delete mode 100644 vendor/github.com/cheekybits/genny/parse/typesets.go
delete mode 100644 vendor/github.com/containerd/cgroups/Vagrantfile
create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go
create mode 100644 vendor/github.com/golang/mock/AUTHORS
create mode 100644 vendor/github.com/golang/mock/CONTRIBUTORS
create mode 100644 vendor/github.com/golang/mock/LICENSE
create mode 100644 vendor/github.com/golang/mock/mockgen/mockgen.go
create mode 100644 vendor/github.com/golang/mock/mockgen/model/model.go
create mode 100644 vendor/github.com/golang/mock/mockgen/parse.go
create mode 100644 vendor/github.com/golang/mock/mockgen/reflect.go
rename vendor/github.com/{spacemonkeygo/spacelog/capture_windows.go => golang/mock/mockgen/version.1.11.go} (68%)
rename vendor/github.com/{spacemonkeygo/spacelog/syslog_windows.go => golang/mock/mockgen/version.1.12.go} (55%)
delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
create mode 100644 vendor/github.com/google/pprof/AUTHORS
create mode 100644 vendor/github.com/google/pprof/CONTRIBUTORS
create mode 100644 vendor/github.com/google/pprof/LICENSE
create mode 100644 vendor/github.com/google/pprof/profile/encode.go
create mode 100644 vendor/github.com/google/pprof/profile/filter.go
create mode 100644 vendor/github.com/google/pprof/profile/index.go
create mode 100644 vendor/github.com/google/pprof/profile/legacy_java_profile.go
create mode 100644 vendor/github.com/google/pprof/profile/legacy_profile.go
create mode 100644 vendor/github.com/google/pprof/profile/merge.go
create mode 100644 vendor/github.com/google/pprof/profile/profile.go
create mode 100644 vendor/github.com/google/pprof/profile/proto.go
create mode 100644 vendor/github.com/google/pprof/profile/prune.go
create mode 100644 vendor/github.com/huin/goupnp/go.work
create mode 100644 vendor/github.com/huin/goupnp/workspace.code-workspace
create mode 100644 vendor/github.com/ipfs/go-datastore/features.go
create mode 100644 vendor/github.com/ipfs/go-datastore/null_ds.go
delete mode 100644 vendor/github.com/klauspost/compress/huff0/autogen.go
delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
delete mode 100644 vendor/github.com/klauspost/compress/zstd/fuzz.go
delete mode 100644 vendor/github.com/klauspost/compress/zstd/fuzz_none.go
create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
create mode 100644 vendor/github.com/koron/go-ssdp/internal/multicast/doc.go
rename vendor/github.com/koron/go-ssdp/{ => internal/multicast}/interface.go (60%)
rename vendor/github.com/koron/go-ssdp/{ => internal/multicast}/multicast.go (61%)
create mode 100644 vendor/github.com/koron/go-ssdp/internal/multicast/udp.go
create mode 100644 vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go
create mode 100644 vendor/github.com/koron/go-ssdp/location.go
delete mode 100644 vendor/github.com/koron/go-ssdp/log.go
create mode 100644 vendor/github.com/koron/go-ssdp/ssdp.go
delete mode 100644 vendor/github.com/koron/go-ssdp/udp.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/common/type.go
rename vendor/github.com/kubeedge/beehive/pkg/core/{context => channel}/context_channel.go (66%)
delete mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/context/context_unixsocket.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/broker/broker.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/config/config.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/context_socket.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/helper.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipe.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipeinfo.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/synckeeper/keeper.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/packer/packer.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/package.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/raw.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/reader.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/wrapper.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/package.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/raw.go
create mode 100644 vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/writer.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/LICENSE
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/key.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/peer/peer.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/peer/record.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/peerstore/helpers.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/peerstore/peerstore.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/routing/options.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/routing/query.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-core/routing/routing.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p-kbucket/.travis.yml
create mode 100644 vendor/github.com/libp2p/go-libp2p-kbucket/version.json
delete mode 100644 vendor/github.com/libp2p/go-libp2p-routing-helpers/.travis.yml
create mode 100644 vendor/github.com/libp2p/go-libp2p-routing-helpers/compconfig.go
create mode 100644 vendor/github.com/libp2p/go-libp2p-routing-helpers/compparallel.go
create mode 100644 vendor/github.com/libp2p/go-libp2p-routing-helpers/compsequential.go
create mode 100644 vendor/github.com/libp2p/go-libp2p-routing-helpers/version.json
create mode 100644 vendor/github.com/libp2p/go-libp2p/CHANGELOG.md
create mode 100644 vendor/github.com/libp2p/go-libp2p/ROADMAP.md
create mode 100644 vendor/github.com/libp2p/go-libp2p/SECURITY.md
delete mode 100644 vendor/github.com/libp2p/go-libp2p/config/constructor_types.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/config/log.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/config/muxer.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/config/reflection_magic.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/config/security.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/config/transport.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go
rename vendor/github.com/libp2p/go-libp2p/core/crypto/{key_not_openssl.go => key_to_stdlib.go} (97%)
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/error_util.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/basic/mocks.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/muxer/muxer-multistream/multistream.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/net/conn-security-multistream/ssms.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/net/nat/mapping.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/clock.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_ranker.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/filter.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/metrics.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go
rename vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/{id_go118.go => user_agent.go} (97%)
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile
delete mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/quic_multiaddr.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go
rename vendor/github.com/libp2p/go-libp2p/p2p/transport/{quic => quicreuse}/reuse.go (63%)
rename vendor/github.com/libp2p/go-libp2p/p2p/transport/{quic => quicreuse}/tracer.go (78%)
rename vendor/github.com/libp2p/go-libp2p/p2p/transport/{quic => quicreuse}/tracer_metrics.go (84%)
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go
create mode 100644 vendor/github.com/libp2p/go-libp2p/tools.go
create mode 100644 vendor/github.com/libp2p/go-msgio/pbio/interfaces.go
create mode 100644 vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go
create mode 100644 vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go
create mode 100644 vendor/github.com/libp2p/go-nat/version.json
delete mode 100644 vendor/github.com/libp2p/go-openssl/.gitignore
delete mode 100644 vendor/github.com/libp2p/go-openssl/AUTHORS
delete mode 100644 vendor/github.com/libp2p/go-openssl/LICENSE
delete mode 100644 vendor/github.com/libp2p/go-openssl/README.md
delete mode 100644 vendor/github.com/libp2p/go-openssl/alloc.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/bio.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/build.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/build_static.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/cert.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/ciphers.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/ciphers_gcm.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/conn.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/ctx.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/dh.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/dhparam.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/digest.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/engine.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/extension.c
delete mode 100644 vendor/github.com/libp2p/go-openssl/fips.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/hmac.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/hostname.c
delete mode 100644 vendor/github.com/libp2p/go-openssl/hostname.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/http.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/init.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/init_posix.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/init_windows.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/key.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/mapping.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/md4.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/md5.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/net.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/nid.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/object.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/pem.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/sha1.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/sha256.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/shim.c
delete mode 100644 vendor/github.com/libp2p/go-openssl/shim.h
delete mode 100644 vendor/github.com/libp2p/go-openssl/sni.c
delete mode 100644 vendor/github.com/libp2p/go-openssl/ssl.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/tickets.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/utils/errors.go
delete mode 100644 vendor/github.com/libp2p/go-openssl/utils/future.go
delete mode 100644 vendor/github.com/libp2p/go-yamux/v3/version.json
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/.gitignore (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/LICENSE (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/LICENSE-BSD (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/README.md (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/addr.go (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/const.go (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/deadline.go (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/mux.go (89%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/ping.go (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/session.go (90%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/spec.md (100%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/stream.go (96%)
rename vendor/github.com/libp2p/go-yamux/{v3 => v4}/util.go (87%)
create mode 100644 vendor/github.com/libp2p/go-yamux/v4/version.json
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/README.md
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/closed_conn.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/qtls/go116.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/qtls/go117.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/new_connection_id.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/newconnectionid_linkedlist.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/mockgen.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/packet_packer.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go
delete mode 100644 vendor/github.com/lucas-clemente/quic-go/tools.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/README.md
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/auth.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/cipher_suites.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/common.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/common_js.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/common_nojs.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/conn.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/handshake_client.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/handshake_client_tls13.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/handshake_messages.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/handshake_server.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/handshake_server_tls13.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/key_agreement.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/ticket.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-16/tls.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/README.md
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/cipher_suites.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/common.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/conn.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/handshake_client.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/handshake_messages.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/handshake_server.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/handshake_server_tls13.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-17/prf.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-18/README.md
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-18/alert.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-18/auth.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-18/unsafe.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/README.md
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/alert.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/cpu.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/cpu_other.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/prf.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/ticket.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/tls.go
delete mode 100644 vendor/github.com/marten-seemann/qtls-go1-19/unsafe.go
delete mode 100644 vendor/github.com/mattn/go-pointer/README.md
delete mode 100644 vendor/github.com/mattn/go-pointer/doc.go
delete mode 100644 vendor/github.com/mattn/go-pointer/pointer.go
delete mode 100644 vendor/github.com/miekg/dns/singleinflight.go
delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go
rename vendor/github.com/minio/sha256-simd/{sha256blockSha_amd64.s => sha256block_amd64.s} (99%)
rename vendor/github.com/{libp2p/go-openssl => multiformats/go-base32}/version.json (100%)
create mode 100644 vendor/github.com/multiformats/go-base36/version.json
delete mode 100644 vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go
create mode 100644 vendor/github.com/multiformats/go-multihash/register/miniosha256/post_go1.21.go
create mode 100644 vendor/github.com/multiformats/go-multihash/register/miniosha256/pre_go1_21.go
create mode 100644 vendor/github.com/multiformats/go-multihash/register/sha256/sha256.go
delete mode 100644 vendor/github.com/multiformats/go-multistream/.gitignore
delete mode 100644 vendor/github.com/multiformats/go-multistream/multistream_fuzz.go
delete mode 100644 vendor/github.com/multiformats/go-varint/.travis.yml
create mode 100644 vendor/github.com/multiformats/go-varint/version.json
delete mode 100644 vendor/github.com/nxadm/tail/.gitignore
delete mode 100644 vendor/github.com/nxadm/tail/CHANGES.md
delete mode 100644 vendor/github.com/nxadm/tail/Dockerfile
delete mode 100644 vendor/github.com/nxadm/tail/LICENSE
delete mode 100644 vendor/github.com/nxadm/tail/README.md
delete mode 100644 vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
delete mode 100644 vendor/github.com/nxadm/tail/ratelimiter/memory.go
delete mode 100644 vendor/github.com/nxadm/tail/ratelimiter/storage.go
delete mode 100644 vendor/github.com/nxadm/tail/tail.go
delete mode 100644 vendor/github.com/nxadm/tail/tail_posix.go
delete mode 100644 vendor/github.com/nxadm/tail/tail_windows.go
delete mode 100644 vendor/github.com/nxadm/tail/util/util.go
delete mode 100644 vendor/github.com/nxadm/tail/watch/filechanges.go
delete mode 100644 vendor/github.com/nxadm/tail/watch/inotify.go
delete mode 100644 vendor/github.com/nxadm/tail/watch/inotify_tracker.go
delete mode 100644 vendor/github.com/nxadm/tail/watch/polling.go
delete mode 100644 vendor/github.com/nxadm/tail/watch/watch.go
delete mode 100644 vendor/github.com/nxadm/tail/winfile/winfile.go
delete mode 100644 vendor/github.com/onsi/ginkgo/.travis.yml
delete mode 100644 vendor/github.com/onsi/ginkgo/CHANGELOG.md
delete mode 100644 vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
delete mode 100644 vendor/github.com/onsi/ginkgo/README.md
delete mode 100644 vendor/github.com/onsi/ginkgo/config/config.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/main.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
delete mode 100644 vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/global/init.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/server.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/specs.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/suite/suite.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
delete mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/reporter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
delete mode 100644 vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
delete mode 100644 vendor/github.com/onsi/ginkgo/types/code_location.go
delete mode 100644 vendor/github.com/onsi/ginkgo/types/synchronization.go
delete mode 100644 vendor/github.com/onsi/ginkgo/types/types.go
rename vendor/github.com/onsi/ginkgo/{ => v2}/.gitignore (90%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
create mode 100644 vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
rename vendor/github.com/onsi/ginkgo/{ => v2}/LICENSE (100%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/README.md
rename vendor/github.com/onsi/ginkgo/{ => v2}/RELEASING.md (58%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/core_dsl.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
rename vendor/github.com/onsi/ginkgo/{reporters/stenographer/support/go-colorable/LICENSE => v2/formatter/colorable_others.go} (76%)
rename vendor/github.com/onsi/ginkgo/{reporters/stenographer/support/go-colorable => v2/formatter}/colorable_windows.go (90%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/formatter/formatter.go (64%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/outline/ginkgo.go (73%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/outline/import.go (97%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/outline/outline.go (79%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
rename vendor/github.com/onsi/ginkgo/{ginkgo => v2/ginkgo/unfocus}/unfocus_command.go (67%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/watch/delta.go (100%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/watch/delta_tracker.go (85%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/watch/dependencies.go (100%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/watch/package_hash.go (92%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/watch/package_hashes.go (100%)
rename vendor/github.com/onsi/ginkgo/{ => v2}/ginkgo/watch/suite.go (90%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/counter.go
rename vendor/github.com/onsi/ginkgo/{internal/failer => v2/internal}/failer.go (61%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/focus.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/group.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
rename vendor/github.com/onsi/ginkgo/{ginkgo/interrupthandler => v2/internal/interrupt_handler}/sigquit_swallower_unix.go (64%)
rename vendor/github.com/onsi/ginkgo/{ginkgo/interrupthandler => v2/internal/interrupt_handler}/sigquit_swallower_windows.go (54%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/node.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/spec.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/suite.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/tree.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/writer.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/table_dsl.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/code_location.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/config.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
rename vendor/github.com/onsi/ginkgo/{ => v2}/types/deprecation_support.go (64%)
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/errors.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/flags.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/types.go
create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/version.go
delete mode 100644 vendor/github.com/onsi/gomega/.travis.yml
delete mode 100644 vendor/github.com/onsi/gomega/Dockerfile
delete mode 100644 vendor/github.com/onsi/gomega/Makefile
delete mode 100644 vendor/github.com/onsi/gomega/docker-compose.yaml
create mode 100644 vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
create mode 100644 vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
create mode 100644 vendor/github.com/onsi/gomega/internal/polling_signal_error.go
create mode 100644 vendor/github.com/onsi/gomega/internal/vetoptdesc.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/have_field.go
create mode 100644 vendor/github.com/onsi/gomega/matchers/have_value.go
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
create mode 100644 vendor/github.com/quic-go/qpack/.codecov.yml
create mode 100644 vendor/github.com/quic-go/qpack/.gitignore
create mode 100644 vendor/github.com/quic-go/qpack/.gitmodules
create mode 100644 vendor/github.com/quic-go/qpack/.golangci.yml
rename vendor/github.com/{nxadm/tail/ratelimiter/Licence => quic-go/qpack/LICENSE.md} (97%)
create mode 100644 vendor/github.com/quic-go/qpack/README.md
create mode 100644 vendor/github.com/quic-go/qpack/decoder.go
create mode 100644 vendor/github.com/quic-go/qpack/encoder.go
create mode 100644 vendor/github.com/quic-go/qpack/header_field.go
create mode 100644 vendor/github.com/quic-go/qpack/static_table.go
create mode 100644 vendor/github.com/quic-go/qpack/tools.go
create mode 100644 vendor/github.com/quic-go/qpack/varint.go
rename vendor/github.com/{marten-seemann/qtls-go1-16 => quic-go/qtls-go1-19}/LICENSE (100%)
create mode 100644 vendor/github.com/quic-go/qtls-go1-19/README.md
rename vendor/github.com/{marten-seemann/qtls-go1-16 => quic-go/qtls-go1-19}/alert.go (100%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/auth.go (100%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/cipher_suites.go (100%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/common.go (99%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/conn.go (96%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-19}/cpu.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-19}/cpu_other.go (100%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/handshake_client.go (91%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/handshake_client_tls13.go (92%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/handshake_messages.go (76%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/handshake_server.go (90%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/handshake_server_tls13.go (91%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-19}/key_agreement.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-16 => quic-go/qtls-go1-19}/key_schedule.go (86%)
rename vendor/github.com/{marten-seemann => quic-go}/qtls-go1-19/notboring.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-16 => quic-go/qtls-go1-19}/prf.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-19}/ticket.go (96%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-19}/tls.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-16 => quic-go/qtls-go1-19}/unsafe.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/LICENSE (100%)
create mode 100644 vendor/github.com/quic-go/qtls-go1-20/README.md
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/alert.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/auth.go (98%)
create mode 100644 vendor/github.com/quic-go/qtls-go1-20/cache.go
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/cipher_suites.go (91%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/common.go (97%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/conn.go (94%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/cpu.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/cpu_other.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/handshake_client.go (88%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/handshake_client_tls13.go (87%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/handshake_messages.go (75%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/handshake_server.go (89%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/handshake_server_tls13.go (89%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/key_agreement.go (94%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/key_schedule.go (63%)
create mode 100644 vendor/github.com/quic-go/qtls-go1-20/notboring.go
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/prf.go (99%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/ticket.go (95%)
rename vendor/github.com/{marten-seemann/qtls-go1-18 => quic-go/qtls-go1-20}/tls.go (100%)
rename vendor/github.com/{marten-seemann/qtls-go1-17 => quic-go/qtls-go1-20}/unsafe.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/.gitignore (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/.golangci.yml (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/Changelog.md (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/LICENSE (100%)
create mode 100644 vendor/github.com/quic-go/quic-go/README.md
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/buffer_pool.go (96%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/client.go (93%)
create mode 100644 vendor/github.com/quic-go/quic-go/closed_conn.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/codecov.yml (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/config.go (73%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/conn_id_generator.go (73%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/conn_id_manager.go (90%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/connection.go (76%)
create mode 100644 vendor/github.com/quic-go/quic-go/connection_timer.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/crypto_stream.go (88%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/crypto_stream_manager.go (93%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/datagram_queue.go (51%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/errors.go (89%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/frame_sorter.go (86%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/framer.go (77%)
create mode 100644 vendor/github.com/quic-go/quic-go/http3/body.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/capsule.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/client.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/error_codes.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/frames.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/gzip_reader.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/http_stream.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/request.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/request_writer.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/response_writer.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/roundtrip.go
create mode 100644 vendor/github.com/quic-go/quic-go/http3/server.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/interface.go (80%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/ack_eliciting.go (80%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go
create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/interfaces.go (74%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go
create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/packet_number_generator.go (92%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/received_packet_handler.go (84%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/received_packet_history.go (74%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/received_packet_tracker.go (86%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/send_mode.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/ackhandler/sent_packet_handler.go (91%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/bandwidth.go (91%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/clock.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/cubic.go (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/cubic_sender.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/hybrid_slow_start.go (91%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/interface.go (94%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/congestion/pacer.go (90%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/flowcontrol/base_flow_controller.go (93%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/flowcontrol/connection_flow_controller.go (94%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/flowcontrol/interface.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/flowcontrol/stream_flow_controller.go (94%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/aead.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/crypto_setup.go (92%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/header_protector.go (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/hkdf.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/initial_aead.go (88%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/interface.go (95%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go
create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/retry.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/session_ticket.go (77%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/tls_extension_handler.go (92%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/token_generator.go (76%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/token_protector.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/handshake/updatable_aead.go (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/logutils/frame.go (54%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/encryption_level.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/key_phase.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/packet_number.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/params.go (99%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/perspective.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/protocol.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/stream.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/protocol/version.go (98%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/qerr/error_codes.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/qerr/errors.go (84%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/qtls/go119.go (94%)
rename vendor/github.com/{lucas-clemente/quic-go/internal/qtls/go118.go => quic-go/quic-go/internal/qtls/go120.go} (91%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/go121.go
create mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/buffered_write_closer.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/byteorder.go (85%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/byteorder_big_endian.go (85%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/ip.go (100%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md
rename vendor/github.com/{lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go => quic-go/quic-go/internal/utils/linkedlist/linkedlist.go} (58%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/log.go (98%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/minmax.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/rand.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/rtt_stats.go (92%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/utils/timer.go (94%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/ack_frame.go (88%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/ack_frame_pool.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/ack_range.go (82%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/connection_close_frame.go (81%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/crypto_frame.go (88%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/data_blocked_frame.go (68%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/datagram_frame.go (84%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/frame_parser.go (55%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/handshake_done_frame.go (74%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/header.go (62%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/interface.go (53%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/log.go (96%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/max_data_frame.go (65%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/max_stream_data_frame.go (72%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/max_streams_frame.go (77%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/new_connection_id_frame.go (76%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/new_token_frame.go (74%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/path_challenge_frame.go (75%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/path_response_frame.go (75%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/ping_frame.go (55%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/pool.go (90%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/reset_stream_frame.go (74%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/retire_connection_id_frame.go (70%)
create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/short_header.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/stop_sending_frame.go (71%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/stream_data_blocked_frame.go (73%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/stream_frame.go (87%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/streams_blocked_frame.go (78%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/transport_parameters.go (77%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/internal/wire/version_negotiation.go (50%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/logging/frame.go (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/logging/interface.go (79%)
create mode 100644 vendor/github.com/quic-go/quic-go/logging/mockgen.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/logging/multiplex.go (81%)
create mode 100644 vendor/github.com/quic-go/quic-go/logging/null_tracer.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/logging/packet_header.go (83%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/logging/types.go (100%)
create mode 100644 vendor/github.com/quic-go/quic-go/mockgen.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/mockgen_private.sh (88%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/mtu_discoverer.go (89%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/multiplexer.go (81%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/packet_handler_map.go (77%)
create mode 100644 vendor/github.com/quic-go/quic-go/packet_packer.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/packet_unpacker.go (55%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/qlog/event.go (93%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/qlog/frame.go (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/qlog/packet_header.go (63%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/qlog/qlog.go (82%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/qlog/trace.go (90%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/qlog/types.go (96%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/quicvarint/io.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/quicvarint/varint.go (73%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/receive_stream.go (86%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/retransmission_queue.go (83%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/send_conn.go (100%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/send_queue.go (93%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/send_stream.go (81%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/server.go (80%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/stream.go (89%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/streams_map.go (90%)
rename vendor/github.com/{lucas-clemente/quic-go/streams_map_incoming_uni.go => quic-go/quic-go/streams_map_incoming.go} (76%)
rename vendor/github.com/{lucas-clemente/quic-go/streams_map_outgoing_uni.go => quic-go/quic-go/streams_map_outgoing.go} (73%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_df.go (90%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_df_linux.go (94%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_df_windows.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_helper_darwin.go (95%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_helper_freebsd.go (93%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_helper_linux.go (96%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_no_oob.go (87%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_oob.go (87%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/sys_conn_windows.go (97%)
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/token_store.go (84%)
create mode 100644 vendor/github.com/quic-go/quic-go/tools.go
rename vendor/github.com/{lucas-clemente => quic-go}/quic-go/window_update_queue.go (91%)
create mode 100644 vendor/github.com/quic-go/quic-go/zero_rtt_queue.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/.gitignore
rename vendor/github.com/{onsi/ginkgo/reporters/stenographer/support/go-isatty => quic-go/webtransport-go}/LICENSE (93%)
create mode 100644 vendor/github.com/quic-go/webtransport-go/README.md
create mode 100644 vendor/github.com/quic-go/webtransport-go/client.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/codecov.yml
create mode 100644 vendor/github.com/quic-go/webtransport-go/errors.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/protocol.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/server.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/session.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/session_manager.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/stream.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/streams_map.go
create mode 100644 vendor/github.com/quic-go/webtransport-go/version.json
delete mode 100644 vendor/github.com/satori/go.uuid/.travis.yml
delete mode 100644 vendor/github.com/satori/go.uuid/LICENSE
delete mode 100644 vendor/github.com/satori/go.uuid/README.md
delete mode 100644 vendor/github.com/satori/go.uuid/codec.go
delete mode 100644 vendor/github.com/satori/go.uuid/generator.go
delete mode 100644 vendor/github.com/satori/go.uuid/sql.go
delete mode 100644 vendor/github.com/satori/go.uuid/uuid.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/.travis.yml
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/LICENSE
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/README.md
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/capture.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/capture_ae.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/capture_linux.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/capture_other.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/collection.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/convenience.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/doc.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/event.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/handler.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/level.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/logger.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/output.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/output_other.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/output_windows.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/setup.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/sighup_other.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/syslog.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/templates.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/templates_others.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/templates_windows.go
delete mode 100644 vendor/github.com/spacemonkeygo/spacelog/text.go
create mode 100644 vendor/go.uber.org/atomic/pointer_go118_pre119.go
create mode 100644 vendor/go.uber.org/dig/.codecov.yml
create mode 100644 vendor/go.uber.org/dig/.gitignore
create mode 100644 vendor/go.uber.org/dig/CHANGELOG.md
rename vendor/{github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT => go.uber.org/dig/LICENSE} (95%)
create mode 100644 vendor/go.uber.org/dig/Makefile
create mode 100644 vendor/go.uber.org/dig/README.md
create mode 100644 vendor/go.uber.org/dig/callback.go
create mode 100644 vendor/go.uber.org/dig/check_license.sh
create mode 100644 vendor/go.uber.org/dig/constructor.go
create mode 100644 vendor/go.uber.org/dig/container.go
create mode 100644 vendor/go.uber.org/dig/cycle_error.go
create mode 100644 vendor/go.uber.org/dig/decorate.go
create mode 100644 vendor/go.uber.org/dig/doc.go
create mode 100644 vendor/go.uber.org/dig/error.go
create mode 100644 vendor/go.uber.org/dig/glide.yaml
create mode 100644 vendor/go.uber.org/dig/graph.go
create mode 100644 vendor/go.uber.org/dig/group.go
create mode 100644 vendor/go.uber.org/dig/inout.go
create mode 100644 vendor/go.uber.org/dig/internal/digerror/errors.go
create mode 100644 vendor/go.uber.org/dig/internal/digreflect/func.go
create mode 100644 vendor/go.uber.org/dig/internal/dot/README.md
create mode 100644 vendor/go.uber.org/dig/internal/dot/graph.go
create mode 100644 vendor/go.uber.org/dig/internal/graph/graph.go
create mode 100644 vendor/go.uber.org/dig/invoke.go
create mode 100644 vendor/go.uber.org/dig/param.go
create mode 100644 vendor/go.uber.org/dig/provide.go
create mode 100644 vendor/go.uber.org/dig/result.go
create mode 100644 vendor/go.uber.org/dig/scope.go
create mode 100644 vendor/go.uber.org/dig/version.go
create mode 100644 vendor/go.uber.org/dig/visualize.go
create mode 100644 vendor/go.uber.org/fx/.codecov.yml
create mode 100644 vendor/go.uber.org/fx/.gitignore
create mode 100644 vendor/go.uber.org/fx/CHANGELOG.md
create mode 100644 vendor/go.uber.org/fx/CONTRIBUTING.md
rename vendor/{github.com/libp2p/go-libp2p-core/LICENSE-MIT => go.uber.org/fx/LICENSE} (87%)
create mode 100644 vendor/go.uber.org/fx/Makefile
create mode 100644 vendor/go.uber.org/fx/README.md
create mode 100644 vendor/go.uber.org/fx/annotated.go
create mode 100644 vendor/go.uber.org/fx/app.go
create mode 100644 vendor/go.uber.org/fx/app_unixes.go
create mode 100644 vendor/go.uber.org/fx/app_windows.go
create mode 100644 vendor/go.uber.org/fx/checklicense.sh
create mode 100644 vendor/go.uber.org/fx/decorate.go
create mode 100644 vendor/go.uber.org/fx/doc.go
create mode 100644 vendor/go.uber.org/fx/extract.go
create mode 100644 vendor/go.uber.org/fx/fxevent/console.go
create mode 100644 vendor/go.uber.org/fx/fxevent/event.go
create mode 100644 vendor/go.uber.org/fx/fxevent/logger.go
create mode 100644 vendor/go.uber.org/fx/fxevent/zap.go
create mode 100644 vendor/go.uber.org/fx/inout.go
create mode 100644 vendor/go.uber.org/fx/internal/fxclock/clock.go
create mode 100644 vendor/go.uber.org/fx/internal/fxlog/default.go
create mode 100644 vendor/go.uber.org/fx/internal/fxlog/spy.go
create mode 100644 vendor/go.uber.org/fx/internal/fxreflect/fxreflect.go
create mode 100644 vendor/go.uber.org/fx/internal/fxreflect/stack.go
create mode 100644 vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go
create mode 100644 vendor/go.uber.org/fx/invoke.go
create mode 100644 vendor/go.uber.org/fx/lifecycle.go
create mode 100644 vendor/go.uber.org/fx/log.go
create mode 100644 vendor/go.uber.org/fx/module.go
create mode 100644 vendor/go.uber.org/fx/populate.go
create mode 100644 vendor/go.uber.org/fx/printer_writer.go
create mode 100644 vendor/go.uber.org/fx/provide.go
create mode 100644 vendor/go.uber.org/fx/replace.go
create mode 100644 vendor/go.uber.org/fx/shutdown.go
create mode 100644 vendor/go.uber.org/fx/signal.go
create mode 100644 vendor/go.uber.org/fx/supply.go
create mode 100644 vendor/go.uber.org/fx/version.go
create mode 100644 vendor/go.uber.org/multierr/error_post_go120.go
create mode 100644 vendor/go.uber.org/multierr/error_pre_go120.go
delete mode 100644 vendor/go.uber.org/multierr/glide.yaml
create mode 100644 vendor/go.uber.org/zap/internal/level_enabler.go
delete mode 100644 vendor/golang.org/x/crypto/AUTHORS
delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS
rename vendor/golang.org/x/crypto/internal/{subtle/aliasing.go => alias/alias.go} (84%)
rename vendor/golang.org/x/crypto/internal/{subtle/aliasing_purego.go => alias/alias_purego.go} (86%)
rename vendor/{github.com/marten-seemann/qtls-go1-18 => golang.org/x/exp}/LICENSE (100%)
create mode 100644 vendor/golang.org/x/exp/PATENTS
create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go
create mode 100644 vendor/golang.org/x/exp/slices/slices.go
create mode 100644 vendor/golang.org/x/exp/slices/sort.go
create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go
create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go
create mode 100644 vendor/golang.org/x/mod/modfile/print.go
create mode 100644 vendor/golang.org/x/mod/modfile/read.go
create mode 100644 vendor/golang.org/x/mod/modfile/rule.go
create mode 100644 vendor/golang.org/x/mod/modfile/work.go
create mode 100644 vendor/golang.org/x/net/http2/hpack/static_table.go
create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go
create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go
create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go
create mode 100644 vendor/golang.org/x/sys/cpu/parse.go
create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go
create mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go
rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (77%)
delete mode 100644 vendor/golang.org/x/sys/unix/str.go
delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go
delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go
delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go
create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go
delete mode 100644 vendor/golang.org/x/term/AUTHORS
delete mode 100644 vendor/golang.org/x/term/CONTRIBUTORS
delete mode 100644 vendor/golang.org/x/text/AUTHORS
delete mode 100644 vendor/golang.org/x/text/CONTRIBUTORS
delete mode 100644 vendor/golang.org/x/text/language/go1_1.go
delete mode 100644 vendor/golang.org/x/text/language/go1_2.go
create mode 100644 vendor/golang.org/x/tools/cmd/goimports/doc.go
create mode 100644 vendor/golang.org/x/tools/cmd/goimports/goimports.go
create mode 100644 vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go
create mode 100644 vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go
delete mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/bexport.go (99%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/bimport.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/exportdata.go (100%)
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/iexport.go (78%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/iimport.go (82%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/newInterface10.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/newInterface11.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/support_go117.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/support_go118.go (62%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/unified_no.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/unified_yes.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/ureader_no.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/gcimporter/ureader_yes.go (71%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/codes.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/decoder.go (83%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/doc.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/encoder.go (95%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/flags.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/frames_go1.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/frames_go17.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/reloc.go (95%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/support.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/sync.go (100%)
rename vendor/golang.org/x/tools/{go => }/internal/pkgbits/syncmarker_string.go (100%)
create mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go
create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go
create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go
create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go
create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go
create mode 100644 vendor/google.golang.org/protobuf/compiler/protogen/protogen.go
create mode 100644 vendor/google.golang.org/protobuf/internal/msgfmt/format.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protopath/path.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protopath/step.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protorange/range.go
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
create mode 100644 vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
create mode 100644 vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go
delete mode 100644 vendor/gopkg.in/tomb.v1/LICENSE
delete mode 100644 vendor/gopkg.in/tomb.v1/README.md
delete mode 100644 vendor/gopkg.in/tomb.v1/tomb.go
create mode 100644 vendor/lukechampine.com/blake3/bao.go
diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml
index df7fd6675..8dbec66e2 100644
--- a/.github/workflows/main.yaml
+++ b/.github/workflows/main.yaml
@@ -91,7 +91,7 @@ jobs:
- name: Install dependences
run: |
- command -v ginkgo || go install github.com/onsi/ginkgo/ginkgo@latest
+ command -v ginkgo || go install github.com/onsi/ginkgo/v2/ginkgo@latest
go install sigs.k8s.io/kind@v0.19.0
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.7/bin/linux/amd64/kubectl && sudo install kubectl /usr/local/bin/kubectl
type keadm || {
diff --git a/LICENSES/vendor/github.com/cheekybits/genny/LICENSE b/LICENSES/vendor/github.com/cheekybits/genny/LICENSE
deleted file mode 100644
index 62d20469b..000000000
--- a/LICENSES/vendor/github.com/cheekybits/genny/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-= vendor/github.com/cheekybits/genny licensed under: =
-
-The MIT License (MIT)
-
-Copyright (c) 2014 cheekybits
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-= vendor/github.com/cheekybits/genny/LICENSE ec35c28b39daf63da91c90b20afd8b23
diff --git a/LICENSES/vendor/github.com/golang/mock/LICENSE b/LICENSES/vendor/github.com/golang/mock/LICENSE
new file mode 100644
index 000000000..c3cef2a6a
--- /dev/null
+++ b/LICENSES/vendor/github.com/golang/mock/LICENSE
@@ -0,0 +1,206 @@
+= vendor/github.com/golang/mock licensed under: =
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+= vendor/github.com/golang/mock/LICENSE 3b83ef96387f14655fc854ddc3c6bd57
diff --git a/LICENSES/vendor/github.com/google/pprof/LICENSE b/LICENSES/vendor/github.com/google/pprof/LICENSE
new file mode 100644
index 000000000..6c5ede2f2
--- /dev/null
+++ b/LICENSES/vendor/github.com/google/pprof/LICENSE
@@ -0,0 +1,206 @@
+= vendor/github.com/google/pprof licensed under: =
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+= vendor/github.com/google/pprof/LICENSE 3b83ef96387f14655fc854ddc3c6bd57
diff --git a/LICENSES/vendor/github.com/libp2p/go-libp2p-core/LICENSE b/LICENSES/vendor/github.com/libp2p/go-libp2p-core/LICENSE
deleted file mode 100644
index f91bc3233..000000000
--- a/LICENSES/vendor/github.com/libp2p/go-libp2p-core/LICENSE
+++ /dev/null
@@ -1,8 +0,0 @@
-= vendor/github.com/libp2p/go-libp2p-core licensed under: =
-
-Dual-licensed under MIT and ASLv2, by way of the [Permissive License Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
-
-Apache-2.0: https://www.apache.org/licenses/license-2.0
-MIT: https://www.opensource.org/licenses/mit
-
-= vendor/github.com/libp2p/go-libp2p-core/LICENSE 65d290d08f6aacf66c578fc174534617
diff --git a/LICENSES/vendor/github.com/libp2p/go-openssl/LICENSE b/LICENSES/vendor/github.com/libp2p/go-openssl/LICENSE
deleted file mode 100644
index 26d30cd33..000000000
--- a/LICENSES/vendor/github.com/libp2p/go-openssl/LICENSE
+++ /dev/null
@@ -1,195 +0,0 @@
-= vendor/github.com/libp2p/go-openssl licensed under: =
-
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-= vendor/github.com/libp2p/go-openssl/LICENSE 19cbd64715b51267a47bf3750cc6a8a5
diff --git a/LICENSES/vendor/github.com/libp2p/go-yamux/v3/LICENSE b/LICENSES/vendor/github.com/libp2p/go-yamux/v4/LICENSE
similarity index 99%
rename from LICENSES/vendor/github.com/libp2p/go-yamux/v3/LICENSE
rename to LICENSES/vendor/github.com/libp2p/go-yamux/v4/LICENSE
index 340413585..faf4faa65 100644
--- a/LICENSES/vendor/github.com/libp2p/go-yamux/v3/LICENSE
+++ b/LICENSES/vendor/github.com/libp2p/go-yamux/v4/LICENSE
@@ -1,4 +1,4 @@
-= vendor/github.com/libp2p/go-yamux/v3 licensed under: =
+= vendor/github.com/libp2p/go-yamux/v4 licensed under: =
Mozilla Public License, version 2.0
@@ -362,4 +362,4 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.
-= vendor/github.com/libp2p/go-yamux/v3/LICENSE 2dd1a9ecf92cd5617f128808f9b85b44
+= vendor/github.com/libp2p/go-yamux/v4/LICENSE 2dd1a9ecf92cd5617f128808f9b85b44
diff --git a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-17/LICENSE b/LICENSES/vendor/github.com/marten-seemann/qtls-go1-17/LICENSE
deleted file mode 100644
index dc2808ea6..000000000
--- a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-17/LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-= vendor/github.com/marten-seemann/qtls-go1-17 licensed under: =
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-= vendor/github.com/marten-seemann/qtls-go1-17/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
diff --git a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-18/LICENSE b/LICENSES/vendor/github.com/marten-seemann/qtls-go1-18/LICENSE
deleted file mode 100644
index fa2d92b15..000000000
--- a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-18/LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-= vendor/github.com/marten-seemann/qtls-go1-18 licensed under: =
-
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-= vendor/github.com/marten-seemann/qtls-go1-18/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
diff --git a/LICENSES/vendor/github.com/mattn/go-pointer/LICENSE b/LICENSES/vendor/github.com/mattn/go-pointer/LICENSE
deleted file mode 100644
index 221c22998..000000000
--- a/LICENSES/vendor/github.com/mattn/go-pointer/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-= vendor/github.com/mattn/go-pointer licensed under: =
-
-The MIT License (MIT)
-
-Copyright (c) 2019 Yasuhiro Matsumoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-= vendor/github.com/mattn/go-pointer/LICENSE 668a0750e654e2ad6ca9772d29659ea0
diff --git a/LICENSES/vendor/github.com/miekg/dns/LICENSE b/LICENSES/vendor/github.com/miekg/dns/LICENSE
index 85abdcf58..66ae740ae 100644
--- a/LICENSES/vendor/github.com/miekg/dns/LICENSE
+++ b/LICENSES/vendor/github.com/miekg/dns/LICENSE
@@ -1,34 +1,33 @@
= vendor/github.com/miekg/dns licensed under: =
-Copyright (c) 2009 The Go Authors. All rights reserved.
+BSD 3-Clause License
+
+Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben.
+All rights reserved.
Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
-As this is fork of the official Go code the same license applies.
-Extensions of the original work are copyright (c) 2011 Miek Gieben
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-= vendor/github.com/miekg/dns/LICENSE 567c1ad6c08ca0ee8d7e0a0cf790aff9
+= vendor/github.com/miekg/dns/LICENSE b5215dfec2c591290f399a181669bef7
diff --git a/LICENSES/vendor/github.com/nxadm/tail/LICENSE b/LICENSES/vendor/github.com/nxadm/tail/LICENSE
deleted file mode 100644
index a0356d753..000000000
--- a/LICENSES/vendor/github.com/nxadm/tail/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-= vendor/github.com/nxadm/tail licensed under: =
-
-# The MIT License (MIT)
-
-# © Copyright 2015 Hewlett Packard Enterprise Development LP
-Copyright (c) 2014 ActiveState
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-= vendor/github.com/nxadm/tail/LICENSE 0bdce43b16cd5c587124d6f274632c87
diff --git a/LICENSES/vendor/github.com/onsi/ginkgo/LICENSE b/LICENSES/vendor/github.com/onsi/ginkgo/v2/LICENSE
similarity index 89%
rename from LICENSES/vendor/github.com/onsi/ginkgo/LICENSE
rename to LICENSES/vendor/github.com/onsi/ginkgo/v2/LICENSE
index 32b61925a..e2207fd78 100644
--- a/LICENSES/vendor/github.com/onsi/ginkgo/LICENSE
+++ b/LICENSES/vendor/github.com/onsi/ginkgo/v2/LICENSE
@@ -1,4 +1,4 @@
-= vendor/github.com/onsi/ginkgo licensed under: =
+= vendor/github.com/onsi/ginkgo/v2 licensed under: =
Copyright (c) 2013-2014 Onsi Fakhouri
@@ -21,4 +21,4 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-= vendor/github.com/onsi/ginkgo/LICENSE 570603114d52313cb86c0206401c9af7
+= vendor/github.com/onsi/ginkgo/v2/LICENSE 570603114d52313cb86c0206401c9af7
diff --git a/LICENSES/vendor/github.com/quic-go/qpack/LICENSE b/LICENSES/vendor/github.com/quic-go/qpack/LICENSE
new file mode 100644
index 000000000..33bdd849b
--- /dev/null
+++ b/LICENSES/vendor/github.com/quic-go/qpack/LICENSE
@@ -0,0 +1,11 @@
+= vendor/github.com/quic-go/qpack licensed under: =
+
+Copyright 2019 Marten Seemann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+= vendor/github.com/quic-go/qpack/LICENSE.md d670bcd1510d5f6c013032947d36132b
diff --git a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-19/LICENSE b/LICENSES/vendor/github.com/quic-go/qtls-go1-19/LICENSE
similarity index 90%
rename from LICENSES/vendor/github.com/marten-seemann/qtls-go1-19/LICENSE
rename to LICENSES/vendor/github.com/quic-go/qtls-go1-19/LICENSE
index ad84d77d6..51183e408 100644
--- a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-19/LICENSE
+++ b/LICENSES/vendor/github.com/quic-go/qtls-go1-19/LICENSE
@@ -1,4 +1,4 @@
-= vendor/github.com/marten-seemann/qtls-go1-19 licensed under: =
+= vendor/github.com/quic-go/qtls-go1-19 licensed under: =
Copyright (c) 2009 The Go Authors. All rights reserved.
@@ -28,4 +28,4 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-= vendor/github.com/marten-seemann/qtls-go1-19/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
+= vendor/github.com/quic-go/qtls-go1-19/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
diff --git a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-16/LICENSE b/LICENSES/vendor/github.com/quic-go/qtls-go1-20/LICENSE
similarity index 90%
rename from LICENSES/vendor/github.com/marten-seemann/qtls-go1-16/LICENSE
rename to LICENSES/vendor/github.com/quic-go/qtls-go1-20/LICENSE
index ca599f44d..8fb1d4962 100644
--- a/LICENSES/vendor/github.com/marten-seemann/qtls-go1-16/LICENSE
+++ b/LICENSES/vendor/github.com/quic-go/qtls-go1-20/LICENSE
@@ -1,4 +1,4 @@
-= vendor/github.com/marten-seemann/qtls-go1-16 licensed under: =
+= vendor/github.com/quic-go/qtls-go1-20 licensed under: =
Copyright (c) 2009 The Go Authors. All rights reserved.
@@ -28,4 +28,4 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-= vendor/github.com/marten-seemann/qtls-go1-16/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
+= vendor/github.com/quic-go/qtls-go1-20/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
diff --git a/LICENSES/vendor/github.com/lucas-clemente/quic-go/LICENSE b/LICENSES/vendor/github.com/quic-go/quic-go/LICENSE
similarity index 88%
rename from LICENSES/vendor/github.com/lucas-clemente/quic-go/LICENSE
rename to LICENSES/vendor/github.com/quic-go/quic-go/LICENSE
index 596d77620..3362bc1f9 100644
--- a/LICENSES/vendor/github.com/lucas-clemente/quic-go/LICENSE
+++ b/LICENSES/vendor/github.com/quic-go/quic-go/LICENSE
@@ -1,4 +1,4 @@
-= vendor/github.com/lucas-clemente/quic-go licensed under: =
+= vendor/github.com/quic-go/quic-go licensed under: =
MIT License
@@ -22,4 +22,4 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-= vendor/github.com/lucas-clemente/quic-go/LICENSE 357080a3c7c0a7c4e23100893c301235
+= vendor/github.com/quic-go/quic-go/LICENSE 357080a3c7c0a7c4e23100893c301235
diff --git a/LICENSES/vendor/github.com/quic-go/webtransport-go/LICENSE b/LICENSES/vendor/github.com/quic-go/webtransport-go/LICENSE
new file mode 100644
index 000000000..02fa62a4d
--- /dev/null
+++ b/LICENSES/vendor/github.com/quic-go/webtransport-go/LICENSE
@@ -0,0 +1,11 @@
+= vendor/github.com/quic-go/webtransport-go licensed under: =
+
+Copyright 2022 Marten Seemann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+= vendor/github.com/quic-go/webtransport-go/LICENSE a1bf34948e29c80c19a0090aa522941d
diff --git a/LICENSES/vendor/github.com/satori/go.uuid/LICENSE b/LICENSES/vendor/github.com/satori/go.uuid/LICENSE
deleted file mode 100644
index 5ca82d7c7..000000000
--- a/LICENSES/vendor/github.com/satori/go.uuid/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-= vendor/github.com/satori/go.uuid licensed under: =
-
-Copyright (C) 2013-2018 by Maxim Bublis
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-= vendor/github.com/satori/go.uuid/LICENSE ae4ba217c6e20c2d8f48f69966b9121b
diff --git a/LICENSES/vendor/github.com/spacemonkeygo/spacelog/LICENSE b/LICENSES/vendor/github.com/spacemonkeygo/spacelog/LICENSE
deleted file mode 100644
index 1a186e26c..000000000
--- a/LICENSES/vendor/github.com/spacemonkeygo/spacelog/LICENSE
+++ /dev/null
@@ -1,195 +0,0 @@
-= vendor/github.com/spacemonkeygo/spacelog licensed under: =
-
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-= vendor/github.com/spacemonkeygo/spacelog/LICENSE 19cbd64715b51267a47bf3750cc6a8a5
diff --git a/vendor/github.com/mattn/go-pointer/LICENSE b/LICENSES/vendor/go.uber.org/dig/LICENSE
similarity index 79%
rename from vendor/github.com/mattn/go-pointer/LICENSE
rename to LICENSES/vendor/go.uber.org/dig/LICENSE
index 5794eddcd..c057c8d73 100644
--- a/vendor/github.com/mattn/go-pointer/LICENSE
+++ b/LICENSES/vendor/go.uber.org/dig/LICENSE
@@ -1,6 +1,6 @@
-The MIT License (MIT)
+= vendor/go.uber.org/dig licensed under: =
-Copyright (c) 2019 Yasuhiro Matsumoto
+Copyright (c) 2017-2018 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -9,13 +9,15 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+= vendor/go.uber.org/dig/LICENSE bcd8a0c79a9155ebb2edf4ac6beb65a9
diff --git a/vendor/github.com/cheekybits/genny/LICENSE b/LICENSES/vendor/go.uber.org/fx/LICENSE
similarity index 79%
rename from vendor/github.com/cheekybits/genny/LICENSE
rename to LICENSES/vendor/go.uber.org/fx/LICENSE
index 519d7f227..2f59326d6 100644
--- a/vendor/github.com/cheekybits/genny/LICENSE
+++ b/LICENSES/vendor/go.uber.org/fx/LICENSE
@@ -1,6 +1,6 @@
-The MIT License (MIT)
+= vendor/go.uber.org/fx licensed under: =
-Copyright (c) 2014 cheekybits
+Copyright (c) 2016-2018 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -9,14 +9,15 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+= vendor/go.uber.org/fx/LICENSE acd140ca5f8399b17a833d830fd27a10
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/LICENSE b/LICENSES/vendor/golang.org/x/exp/LICENSE
similarity index 92%
rename from vendor/github.com/marten-seemann/qtls-go1-19/LICENSE
rename to LICENSES/vendor/golang.org/x/exp/LICENSE
index 6a66aea5e..ced05aa70 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/LICENSE
+++ b/LICENSES/vendor/golang.org/x/exp/LICENSE
@@ -1,3 +1,5 @@
+= vendor/golang.org/x/exp licensed under: =
+
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -25,3 +27,5 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+= vendor/golang.org/x/exp/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
diff --git a/LICENSES/vendor/gopkg.in/tomb.v1/LICENSE b/LICENSES/vendor/gopkg.in/tomb.v1/LICENSE
deleted file mode 100644
index e017367d6..000000000
--- a/LICENSES/vendor/gopkg.in/tomb.v1/LICENSE
+++ /dev/null
@@ -1,33 +0,0 @@
-= vendor/gopkg.in/tomb.v1 licensed under: =
-
-tomb - support for clean goroutine termination in Go.
-
-Copyright (c) 2010-2011 - Gustavo Niemeyer
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-= vendor/gopkg.in/tomb.v1/LICENSE 95d4102f39f26da9b66fee5d05ac597b
diff --git a/go.mod b/go.mod
index cd7aea457..b7646a88e 100644
--- a/go.mod
+++ b/go.mod
@@ -5,28 +5,27 @@ go 1.19
require (
contrib.go.opencensus.io/exporter/prometheus v0.4.2
github.com/buraksezer/consistent v0.9.0
- github.com/cespare/xxhash/v2 v2.1.2
+ github.com/cespare/xxhash/v2 v2.2.0
github.com/containernetworking/cni v0.8.1
github.com/coredns/caddy v1.1.0
github.com/coredns/coredns v1.8.0
github.com/fsnotify/fsnotify v1.5.4
- github.com/golang/protobuf v1.5.2
- github.com/ipfs/go-datastore v0.5.1
+ github.com/golang/protobuf v1.5.3
+ github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-log/v2 v2.5.1
github.com/kubeedge/beehive v0.0.0
- github.com/kubeedge/kubeedge v1.11.1
- github.com/libp2p/go-libp2p v0.22.0
- github.com/libp2p/go-libp2p-kad-dht v0.18.0
- github.com/libp2p/go-msgio v0.2.0
- github.com/multiformats/go-multiaddr v0.6.0
- github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.16.0
- github.com/prometheus/client_golang v1.13.0
+ github.com/kubeedge/kubeedge v1.12.7
+ github.com/libp2p/go-libp2p v0.28.2
+ github.com/libp2p/go-libp2p-kad-dht v0.21.0
+ github.com/libp2p/go-msgio v0.3.0
+ github.com/multiformats/go-multiaddr v0.9.0
+ github.com/onsi/ginkgo/v2 v2.9.7
+ github.com/onsi/gomega v1.27.7
+ github.com/prometheus/client_golang v1.14.0
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
github.com/spf13/cobra v1.2.1
github.com/spf13/pflag v1.0.5
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
- go.opencensus.io v0.23.0
istio.io/api v0.0.0-20220124163811-3adce9124ae7
istio.io/client-go v1.12.3
k8s.io/api v0.23.1
@@ -44,20 +43,19 @@ require (
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/DataDog/datadog-go v3.5.0+incompatible // indirect
- github.com/benbjohnson/clock v1.3.0 // indirect
+ github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
- github.com/cheekybits/genny v1.0.0 // indirect
- github.com/containerd/cgroups v1.0.4 // indirect
- github.com/coreos/go-systemd/v22 v22.3.2 // indirect
+ github.com/containerd/cgroups v1.1.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dnstap/golang-dnstap v0.2.2 // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
- github.com/docker/go-units v0.4.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
@@ -66,25 +64,27 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
- github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
+ github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/google/go-cmp v0.5.8 // indirect
+ github.com/golang/mock v1.6.0 // indirect
+ github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
+ github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
- github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
- github.com/huin/goupnp v1.0.3 // indirect
+ github.com/huin/goupnp v1.2.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
- github.com/ipfs/go-cid v0.2.0 // indirect
+ github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-ipns v0.2.0 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
@@ -93,51 +93,42 @@ require (
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.15.1 // indirect
- github.com/klauspost/cpuid/v2 v2.1.0 // indirect
- github.com/koron/go-ssdp v0.0.3 // indirect
+ github.com/klauspost/compress v1.16.5 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.5 // indirect
+ github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kubeedge/viaduct v0.0.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
- github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
- github.com/libp2p/go-libp2p-core v0.20.0 // indirect
- github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect
+ github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
+ github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect
github.com/libp2p/go-libp2p-record v0.2.0 // indirect
- github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect
- github.com/libp2p/go-nat v0.1.0 // indirect
- github.com/libp2p/go-netroute v0.2.0 // indirect
- github.com/libp2p/go-openssl v0.1.0 // indirect
- github.com/libp2p/go-reuseport v0.2.0 // indirect
- github.com/libp2p/go-yamux/v3 v3.1.2 // indirect
+ github.com/libp2p/go-libp2p-routing-helpers v0.4.0 // indirect
+ github.com/libp2p/go-nat v0.2.0 // indirect
+ github.com/libp2p/go-netroute v0.2.1 // indirect
+ github.com/libp2p/go-reuseport v0.3.0 // indirect
+ github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
- github.com/lucas-clemente/quic-go v0.28.1 // indirect
- github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
- github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
- github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
- github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
- github.com/mattn/go-isatty v0.0.16 // indirect
- github.com/mattn/go-pointer v0.0.1 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
- github.com/miekg/dns v1.1.50 // indirect
+ github.com/mattn/go-isatty v0.0.19 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/miekg/dns v1.1.54 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
- github.com/minio/sha256-simd v1.0.0 // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
- github.com/multiformats/go-base32 v0.0.4 // indirect
- github.com/multiformats/go-base36 v0.1.0 // indirect
+ github.com/multiformats/go-base32 v0.1.0 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
- github.com/multiformats/go-multibase v0.1.1 // indirect
- github.com/multiformats/go-multicodec v0.5.0 // indirect
- github.com/multiformats/go-multihash v0.2.1 // indirect
- github.com/multiformats/go-multistream v0.3.3 // indirect
- github.com/multiformats/go-varint v0.0.6 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.0 // indirect
+ github.com/multiformats/go-multihash v0.2.2 // indirect
+ github.com/multiformats/go-multistream v0.4.1 // indirect
+ github.com/multiformats/go-varint v0.0.7 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.0.3 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
@@ -149,38 +140,44 @@ require (
github.com/philhofer/fwd v1.0.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
+ github.com/quic-go/qpack v0.4.0 // indirect
+ github.com/quic-go/qtls-go1-19 v0.3.3 // indirect
+ github.com/quic-go/qtls-go1-20 v0.2.3 // indirect
+ github.com/quic-go/quic-go v0.33.0 // indirect
+ github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
- github.com/satori/go.uuid v1.2.0 // indirect
- github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/tinylib/msgp v1.1.2 // indirect
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
- go.uber.org/atomic v1.10.0 // indirect
- go.uber.org/multierr v1.8.0 // indirect
- go.uber.org/zap v1.22.0 // indirect
- golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
- golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
+ go.uber.org/dig v1.17.0 // indirect
+ go.uber.org/fx v1.19.2 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.24.0 // indirect
+ golang.org/x/crypto v0.7.0 // indirect
+ golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
+ golang.org/x/mod v0.10.0 // indirect
+ golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
- golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
- golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
- golang.org/x/text v0.3.7 // indirect
+ golang.org/x/sync v0.2.0 // indirect
+ golang.org/x/sys v0.8.0 // indirect
+ golang.org/x/term v0.8.0 // indirect
+ golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
- golang.org/x/tools v0.1.12 // indirect
+ golang.org/x/tools v0.9.1 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.42.0 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
+ google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
istio.io/gogo-genproto v0.0.0-20210113155706-4daf5697332f // indirect
@@ -188,13 +185,13 @@ require (
k8s.io/component-helpers v0.23.0 // indirect
k8s.io/klog v1.0.0 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
- lukechampine.com/blake3 v1.1.7 // indirect
+ lukechampine.com/blake3 v1.2.1 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
)
replace (
- github.com/kubeedge/beehive v0.0.0 => github.com/kubeedge/beehive v0.0.0-20201125122335-cd19bca6e436
+ github.com/kubeedge/beehive v0.0.0 => github.com/kubeedge/beehive v1.13.0
github.com/kubeedge/viaduct v0.0.0 => github.com/kubeedge/viaduct v0.0.0-20210601015050-d832643a3d35
k8s.io/api v0.0.0 => k8s.io/api v0.23.0
k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.23.0
diff --git a/go.sum b/go.sum
index 5a248dade..49110bf27 100644
--- a/go.sum
+++ b/go.sum
@@ -53,7 +53,6 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/256dpi/gomqtt v0.10.4/go.mod h1:C+397CXyL3GyYapLHsAMlAozXofYs4XxwkNZsSPvbuI=
github.com/256dpi/mercury v0.1.0/go.mod h1:W2/eVt6tqfSn5J8en63oGNmnZSb66PUo0e5YBzSHkkU=
-github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v40.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@@ -131,7 +130,7 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
-github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
+github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -148,7 +147,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530=
-github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -185,8 +183,9 @@ github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkY
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -207,15 +206,6 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
-github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
-github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
-github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
-github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
-github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
-github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
-github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
-github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
@@ -234,12 +224,12 @@ github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
-github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@@ -283,8 +273,9 @@ github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4S
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
-github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
+github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
@@ -308,7 +299,7 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
-github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
+github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -369,7 +360,6 @@ github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFD
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
@@ -382,15 +372,15 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e/go.mod h1:4xMOusJ7xxc84WclVxKT8+lNfGYDwojOUC2OQNCwcj4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@@ -404,23 +394,19 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
-github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
-github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
+github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
-github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU=
-github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/distribution/distribution/v3 v3.0.0-20210804104954-38ab4c606ee3/go.mod h1:gt38b7cvVKazi5XkHvINNytZXgTEntyhtyM3HQz46Nk=
@@ -442,8 +428,9 @@ github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6Uezg
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
@@ -533,8 +520,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@@ -558,8 +545,9 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM=
github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI=
github.com/gobuffalo/packr/v2 v2.8.1/go.mod h1:c/PLlOuTU+p3SybaJATW3H6lX/iK7xEz5OeMf+NnJpg=
@@ -625,8 +613,9 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
@@ -634,7 +623,7 @@ github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/cadvisor v0.39.3/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI=
+github.com/google/cadvisor v0.39.4/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI=
github.com/google/cadvisor v0.43.0/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ=
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
@@ -649,14 +638,14 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -673,6 +662,9 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs=
+github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -720,21 +712,19 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
-github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
-github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
@@ -760,10 +750,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
-github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
-github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
-github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
-github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
+github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY=
+github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
@@ -777,33 +765,19 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w=
-github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M=
-github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
-github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
-github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0=
-github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro=
-github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
-github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
-github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ=
-github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk=
+github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
+github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
+github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
+github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
-github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk=
-github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
-github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
-github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU=
github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24=
-github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
-github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
-github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs=
github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo=
-github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
-github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
@@ -816,12 +790,9 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
-github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
-github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
-github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -835,7 +806,6 @@ github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUB
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v0.0.0-20170918002102-8eab2debe79d/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -851,28 +821,24 @@ github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSg
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
github.com/karrick/godirwalk v1.15.8/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
-github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
+github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
-github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
-github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8=
-github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA=
+github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
+github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -886,10 +852,11 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubeedge/beehive v0.0.0-20201125122335-cd19bca6e436 h1:HbC6FyGrtyNpcg2jPXhoYvf0Dtj+wkgz/Ydn0cifRxo=
github.com/kubeedge/beehive v0.0.0-20201125122335-cd19bca6e436/go.mod h1:98VgUi/n7HZkxT3Q7Lak75kPtIRRrWam02BgqgT0tkE=
-github.com/kubeedge/kubeedge v1.11.1 h1:d136Nt1yCH/6I48BywtnyeaTio4P0vu774H/MKN3sOY=
-github.com/kubeedge/kubeedge v1.11.1/go.mod h1:GMhBb1+Nv4PuYzI80cF+jT5Q3PlPSIY+gifphUNgQPg=
+github.com/kubeedge/beehive v1.13.0 h1:W6cZbBNwBqQkF1zLG+fwAxhX3VJ6oZVzS5rswHdVY4I=
+github.com/kubeedge/beehive v1.13.0/go.mod h1:eF8k32OLF9Jvzpjt/6xQrwU49JFRwvClung8ln/6STg=
+github.com/kubeedge/kubeedge v1.12.7 h1:6iY/ewOskQIoyGhOsod8vecMsKycFkDFYz2fTdsmKEg=
+github.com/kubeedge/kubeedge v1.12.7/go.mod h1:V92wOYSS6DcWcGNDc5YaH/b+iVvGYtPwEcQja82RyxQ=
github.com/kubeedge/viaduct v0.0.0-20210601015050-d832643a3d35 h1:mzkQK9je7aGPOqNUeWlkehxFAKgD0XWDRxY3p8d88qM=
github.com/kubeedge/viaduct v0.0.0-20210601015050-d832643a3d35/go.mod h1:9YFeEaWK8WdVl+sjcQlhuRe1rEhWz3Nu/l29CKmL+EY=
github.com/kubernetes-csi/csi-lib-utils v0.6.1/go.mod h1:GVmlUmxZ+SUjVLXicRFjqWUUvWez0g0Y78zNV9t7KfQ=
@@ -899,59 +866,35 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc=
-github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
-github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
-github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
-github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
-github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw=
-github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4=
-github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
-github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw=
-github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI=
-github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA=
-github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y=
-github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y=
-github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8=
-github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY=
-github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY=
-github.com/libp2p/go-libp2p-kad-dht v0.18.0 h1:akqO3gPMwixR7qFSFq70ezRun97g5hrA/lBW9jrjUYM=
-github.com/libp2p/go-libp2p-kad-dht v0.18.0/go.mod h1:Gb92MYIPm3K2pJLGn8wl0m8wiKDvHrYpg+rOd0GzzPA=
-github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70=
-github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk=
-github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s=
-github.com/libp2p/go-libp2p-peerstore v0.8.0 h1:bzTG693TA1Ju/zKmUCQzDLSqiJnyRFVwPpuloZ/OZtI=
-github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk=
+github.com/libp2p/go-libp2p v0.28.2 h1:lO/g0ccVru6nUVHyLE7C1VRr7B2AFp9cvHhf+l+Te6w=
+github.com/libp2p/go-libp2p v0.28.2/go.mod h1:fOLgCNgLiWFdmtXyQBwmuCpukaYOA+yw4rnBiScDNmI=
+github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
+github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
+github.com/libp2p/go-libp2p-kad-dht v0.21.0 h1:J0Yd22VA+sk0CJRGMgtfHvLVIkZDyJ3AJGiljywIw5U=
+github.com/libp2p/go-libp2p-kad-dht v0.21.0/go.mod h1:Bhm9diAFmc6qcWAr084bHNL159srVZRKADdp96Qqd1I=
+github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA=
+github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U=
github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
-github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY=
-github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw=
-github.com/libp2p/go-libp2p-testing v0.11.0 h1:+R7FRl/U3Y00neyBSM2qgDzqz3HkWH24U9nMlascHL4=
-github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU=
-github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
-github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA=
-github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU=
-github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY=
-github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg=
-github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM=
-github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk=
-github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE=
-github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI=
-github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
-github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
-github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
-github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo=
-github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc=
-github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560=
-github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k=
-github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
-github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q=
-github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4=
+github.com/libp2p/go-libp2p-routing-helpers v0.4.0 h1:b7y4aixQ7AwbqYfcOQ6wTw8DQvuRZeTAA0Od3YYN5yc=
+github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
+github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
+github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
+github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
+github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw=
+github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI=
+github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ=
+github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
@@ -960,15 +903,12 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
github.com/lucas-clemente/quic-go v0.19.2/go.mod h1:ZUygOqIoai0ASXXLJ92LTnKdbqh9MHCLTX6Nr1jUrK0=
-github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU=
-github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -981,28 +921,15 @@ github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHef
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
-github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
-github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
-github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ=
-github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
-github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM=
-github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4=
-github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
-github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU=
-github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
-github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
-github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0=
-github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -1012,17 +939,17 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.34/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
-github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
-github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
+github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
+github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
@@ -1032,11 +959,10 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
-github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
-github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
-github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4=
@@ -1082,46 +1008,36 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
-github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE=
-github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM=
-github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
-github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
-github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE=
-github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y=
-github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI=
-github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc=
-github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg=
-github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM=
+github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ=
+github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0=
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
-github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
-github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA=
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
-github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
-github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
-github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
-github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs=
-github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues=
-github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
+github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
-github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
-github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
-github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
-github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
-github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o=
-github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg=
+github.com/multiformats/go-multihash v0.2.2 h1:Uu7LWs/PmWby1gkj1S1DXx3zyd3aVabA4FiMKn/2tAc=
+github.com/multiformats/go-multihash v0.2.2/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo=
+github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
-github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
-github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU=
@@ -1157,10 +1073,12 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss=
+github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -1168,10 +1086,11 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
-github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
+github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -1218,7 +1137,6 @@ github.com/openzipkin/zipkin-go v0.2.2 h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/paypal/gatt v0.0.0-20151011220935-4ae819d591cf/go.mod h1:+AwQL2mK3Pd3S+TUwg0tYQjid0q1txyNUJuuSmz8Kdk=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@@ -1258,16 +1176,18 @@ github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
@@ -1303,6 +1223,16 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
+github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE=
+github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
+github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI=
+github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
+github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
+github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
+github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
+github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
@@ -1320,7 +1250,6 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
@@ -1368,7 +1297,6 @@ github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYl
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
@@ -1376,8 +1304,6 @@ github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWK
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
-github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
-github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -1389,7 +1315,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
@@ -1403,11 +1328,9 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc=
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
@@ -1418,8 +1341,10 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -1427,14 +1352,16 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
-github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
@@ -1445,7 +1372,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@@ -1467,10 +1393,8 @@ github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvS
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
-github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
-github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
@@ -1484,6 +1408,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
@@ -1509,13 +1434,13 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
@@ -1534,18 +1459,22 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
-go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
+go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
+go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY=
+go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
-go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
@@ -1554,18 +1483,16 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0=
-go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
-golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -1591,8 +1518,9 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1609,6 +1537,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
+golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -1639,8 +1569,9 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1654,7 +1585,6 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -1703,15 +1633,14 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
-golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1744,8 +1673,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1756,15 +1685,10 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1776,7 +1700,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1859,21 +1782,26 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
-golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1882,8 +1810,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1899,7 +1828,6 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -1964,10 +1892,10 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
-golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
+golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2122,8 +2050,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/DataDog/dd-trace-go.v1 v1.27.1 h1:9BJfwtuCUrUiNB3WCTXHuaP5E/J/zfMPUUaRJoEQfdc=
gopkg.in/DataDog/dd-trace-go.v1 v1.27.1/go.mod h1:Sp1lku8WJMvNV0kjDI4Ni/T7J/U3BO5ct5kEaoVU8+I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
@@ -2151,8 +2080,6 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8=
-gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
@@ -2203,13 +2130,13 @@ k8s.io/api v0.20.10/go.mod h1:0kei3F6biGjtRQBo5dUeujq6Ji3UCh9aOSfp/THYd7I=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
-k8s.io/api v0.22.6/go.mod h1:q1F7IfaNrbi/83ebLy3YFQYLjPSNyunZ/IXQxMmbwCg=
+k8s.io/api v0.22.17/go.mod h1:6qVojJ3y+qIq7JSMwTH0BcPHl3dch4HefIC+4nguZhs=
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
k8s.io/api v0.23.1 h1:ncu/qfBfUoClqwkTGbeRqqOqBCRoUAflMuOaOD7J0c8=
k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo=
k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA=
k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw=
-k8s.io/apiextensions-apiserver v0.22.6/go.mod h1:wNsLwy8mfIkGThiv4Qq/Hy4qRazViKXqmH5pfYiRKyY=
+k8s.io/apiextensions-apiserver v0.22.17/go.mod h1:mLAmK33c0j1UJW03FtFSiJD0SVkDoedSKsoUwJyUfPc=
k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
@@ -2220,7 +2147,7 @@ k8s.io/apimachinery v0.20.10/go.mod h1:kQa//VOAwyVwJ2+L9kOREbsnryfsGSkSM1przND4+
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
-k8s.io/apimachinery v0.22.6/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU=
+k8s.io/apimachinery v0.22.17/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU=
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo=
k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno=
@@ -2229,11 +2156,11 @@ k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI=
k8s.io/apiserver v0.22.4/go.mod h1:38WmcUZiiy41A7Aty8/VorWRa8vDGqoUzDf2XYlku0E=
-k8s.io/apiserver v0.22.6/go.mod h1:OlL1rGa2kKWGj2JEXnwBcul/BwC9Twe95gm4ohtiIIs=
+k8s.io/apiserver v0.22.17/go.mod h1:zNXYCtXZ91AkmIUZgQ8lT9vdlDqgSkokJpds/F6DdGU=
k8s.io/apiserver v0.23.0 h1:Ds/QveXWi9aJ8ISB0CJa4zBNc5njxAs5u3rmMIexqCY=
k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
k8s.io/cli-runtime v0.22.4/go.mod h1:x35r0ERHXr/MrbR1C6MPJxQ3xKG6+hXi9m2xLzlMPZA=
-k8s.io/cli-runtime v0.22.6/go.mod h1:UY6oHyBUZ/y0O6ovyyPy++S5LdijxJSOizXsrAP+qKU=
+k8s.io/cli-runtime v0.22.17/go.mod h1:VCbgEEvQxNvd+L/d4rezZkf4thR/XWThyik2W9AZOQU=
k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU=
k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
@@ -2243,18 +2170,18 @@ k8s.io/client-go v0.20.10/go.mod h1:fFg+aLoasv/R+xiVaWjxeqGFYltzgQcOQzkFaSRfnJ0=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
-k8s.io/client-go v0.22.6/go.mod h1:TffU4AV2idZGeP+g3kdFZP+oHVHWPL1JYFySOALriw0=
+k8s.io/client-go v0.22.17/go.mod h1:SQPVpN+E/5Q/aSV7fYDT8VKVdaljhxI/t/84ADVJoC4=
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
k8s.io/client-go v0.23.1 h1:Ma4Fhf/p07Nmj9yAB1H7UwbFHEBrSPg8lviR24U2GiQ=
k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0=
-k8s.io/cloud-provider v0.22.6/go.mod h1:qKWDzOCIsSWlPvC4txa9X+IuxeJX8LWf9jz/ClpBIPQ=
+k8s.io/cloud-provider v0.22.17/go.mod h1:OIcfw/72pV8x1P0O1JJAyIJz+EJRuTzxG6BvqlQY94g=
k8s.io/cloud-provider v0.23.0 h1:9LATZJu57XanN7po4Xfj6jTLp44uhKiu5Xa3+3Cutz0=
k8s.io/cloud-provider v0.23.0/go.mod h1:vY9zulPUCjwCg9TBMh5Qi42JUZUNecQNF7FojNELWws=
-k8s.io/cluster-bootstrap v0.22.6/go.mod h1:G8vRWaBElK/3fk3UsnqFKO4Sr8LyX6urLqdkuPXOC8k=
+k8s.io/cluster-bootstrap v0.22.17/go.mod h1:9ERzPcRveHKZBDDqo9jTTotpkgRpRjw9RMH3rXE4hCw=
k8s.io/cluster-bootstrap v0.23.0/go.mod h1:VltEnKWfrRTiKgOXp3ts3vh7yqNlH6KFKFflo9GtCBg=
k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw=
-k8s.io/code-generator v0.22.6/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU=
+k8s.io/code-generator v0.22.17/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU=
k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
@@ -2262,20 +2189,20 @@ k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMY
k8s.io/component-base v0.20.10/go.mod h1:ZKOEin1xu68aJzxgzl5DZSp5J1IrjAOPlPN90/t6OI8=
k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug=
k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A=
-k8s.io/component-base v0.22.6/go.mod h1:ngHLefY4J5fq2fApNdbWyj4yh0lvw36do4aAjNN8rc8=
+k8s.io/component-base v0.22.17/go.mod h1:Mrcvmxs+Ctx/xCYGWoFAvfZO9DC4gDgLtUbPJ4PjjUE=
k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8=
k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
k8s.io/component-helpers v0.23.0 h1:qNbqN10QTefiWcCOPkHL/0nn81sdKVv6ZgEXcSyot/U=
k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg=
-k8s.io/controller-manager v0.22.6/go.mod h1:yklwmkmk51pUYyo8URpFer7R07wcvVq/xNRUN36oDDs=
+k8s.io/controller-manager v0.22.17/go.mod h1:M5e7BfU5eZOLkDG3MLHM6NkNJzPM7DRr1WwxOgnWy1g=
k8s.io/controller-manager v0.23.0/go.mod h1:6/IKItSv6p9FY3mSbHgsOYmt4y+HDxiC5hEFg9rJVc8=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
-k8s.io/cri-api v0.22.6/go.mod h1:uAw9CICQq20/1yB4ZnWT2TjJyMMROl4typFfWaURLwQ=
+k8s.io/cri-api v0.22.17/go.mod h1:uAw9CICQq20/1yB4ZnWT2TjJyMMROl4typFfWaURLwQ=
k8s.io/cri-api v0.23.0/go.mod h1:2edENu3/mkyW3c6fVPPPaVGEFbLRacJizBbSp7ZOLOo=
-k8s.io/csi-translation-lib v0.22.6/go.mod h1:/O6a26XNs3xsiAZeHUF75dDQt8dyT53c5C8JBSryLTQ=
+k8s.io/csi-translation-lib v0.22.17/go.mod h1:TMR+uzEzgOpteROGWZr2Xji4WUTOJL1vLArRkbWRc8o=
k8s.io/csi-translation-lib v0.23.0/go.mod h1:ho0ljka+BEcdlvFrG08L8FpYi6QJeSGgQLWeVOAeeM8=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
@@ -2294,18 +2221,18 @@ k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2R
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-proxy v0.23.0/go.mod h1:AFPfNIiOeDhHVtfN7ZfE1Wd8aP5qYov3khPu4VFeBb4=
-k8s.io/kube-scheduler v0.22.6/go.mod h1:DcHj6ixvb0M1PvWFbg133a1pz/vv7OSCgZUDU/UUhlU=
+k8s.io/kube-scheduler v0.22.17/go.mod h1:PuJ2MLedyBpXIVaVahO6nB7RQ8TlxAz7AlEWDh+U0Bk=
k8s.io/kube-scheduler v0.23.0/go.mod h1:BXDjbJEXtr9PU5/XzLtWMNG6Mid4GYBSGVWzP72UxKk=
k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI=
-k8s.io/kubelet v0.22.6/go.mod h1:/nSfVw7oYzpmLn8Ua2q2Zix09Fq5gpDGnNqTbab9wts=
+k8s.io/kubelet v0.22.17/go.mod h1:ENo6Nj79qsYEU7bU4DYzlZwE9kxLQdPdzFSshIdepio=
k8s.io/kubelet v0.23.0/go.mod h1:A4DxfIt5Ka+rz54HAFhs1bgiFjJT6lcaAYUcACZl1/k=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/kubernetes v1.22.6/go.mod h1:l2ikQCpfvsMAXgL7FDtzgn/AVdjt4XGUYHMXn2vuzYI=
+k8s.io/kubernetes v1.22.17/go.mod h1:4jiaBaIxIZcb9Y5IKxnavbaCCK623RcD4gEVFDXMLmY=
k8s.io/kubernetes v1.23.0 h1:r2DrryCpnmFfBuelpUNSWXHtD6Zy7SdwaCcycV5DsJE=
k8s.io/kubernetes v1.23.0/go.mod h1:sgD3+Qzb8FHlRKlZnNCN+np3zZuHEAb/0PKLJkYyCUI=
k8s.io/legacy-cloud-providers v0.23.0/go.mod h1:tM5owPlhLyEYJC2FLHgcGu1jks5ANvH2JlY03mnUYU4=
k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo=
-k8s.io/mount-utils v0.22.6/go.mod h1:dHl6c2P60T5LHUnZxVslyly9EDCMzvhtISO5aY+Z4sk=
+k8s.io/mount-utils v0.22.17/go.mod h1:u46CSxVRyVj/49xJYn8QzjhYqcvB9w6qufKKiyLKhnw=
k8s.io/mount-utils v0.23.0/go.mod h1:9pFhzVjxle1osJUo++9MFDat9HPkQUOoHCn+eExZ3Ew=
k8s.io/pod-security-admission v0.23.0/go.mod h1:vGExA081PHZFK9Yma4kuPtfWwy5zxbEUhniiUDKFicM=
k8s.io/sample-apiserver v0.23.0/go.mod h1:o0U/1hkfndbnLg1OfVHQiG08lmDkYJq7qljCuwjoTrI=
@@ -2318,8 +2245,8 @@ k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
-lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
+lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
+lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
diff --git a/pkg/apis/module/module.go b/pkg/apis/module/module.go
index 85e7fce53..6d30a1956 100644
--- a/pkg/apis/module/module.go
+++ b/pkg/apis/module/module.go
@@ -16,9 +16,9 @@ type module interface {
Shutdown()
}
-func Initialize(coreModules map[string]core.Module) error {
+func Initialize(coreModules map[string]*core.ModuleInfo) error {
for _, coreModule := range coreModules {
- m, ok := coreModule.(module)
+ m, ok := coreModule.GetModule().(module)
if !ok {
return fmt.Errorf("can't convert %T to module", coreModule)
}
diff --git a/pkg/tunnel/module.go b/pkg/tunnel/module.go
index eefc18376..9467f0e36 100644
--- a/pkg/tunnel/module.go
+++ b/pkg/tunnel/module.go
@@ -161,10 +161,10 @@ func newEdgeTunnel(c *v1alpha1.EdgeTunnelConfig) (*EdgeTunnel, error) {
ddht, err = newDHT(ctx, h, relayMap)
return ddht, err
}),
- libp2p.EnableAutoRelay(
- autorelay.WithPeerSource(func(numPeers int) <-chan peer.AddrInfo {
+ libp2p.EnableAutoRelayWithPeerSource(
+ func(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
return peerSource
- }, 15*time.Second),
+ },
autorelay.WithMinCandidates(0),
autorelay.WithMaxCandidates(c.MaxCandidates),
autorelay.WithBackoff(30*time.Second),
diff --git a/pkg/tunnel/tunnel.go b/pkg/tunnel/tunnel.go
index 0edb93334..fcabf7005 100644
--- a/pkg/tunnel/tunnel.go
+++ b/pkg/tunnel/tunnel.go
@@ -28,7 +28,6 @@ import (
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/prometheus/client_golang/prometheus"
- "go.opencensus.io/stats/view"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
@@ -715,11 +714,7 @@ func (t *EdgeTunnel) runMetricsServer() {
}
klog.Infof("Starting Metrics service")
- err := view.Register(obs.DefaultViews...)
- if err != nil {
- klog.Errorf("Failed to register view error: %v", err)
- return
- }
+ obs.MustRegisterWith(prometheus.DefaultRegisterer)
exporter, err := ocprom.NewExporter(ocprom.Options{
Registry: prometheus.DefaultRegisterer.(*prometheus.Registry),
})
diff --git a/tests/e2e/k8s/http.go b/tests/e2e/k8s/http.go
index 04d8e129f..1cc83c5cc 100644
--- a/tests/e2e/k8s/http.go
+++ b/tests/e2e/k8s/http.go
@@ -6,7 +6,7 @@ import (
"net/http"
"time"
- "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/v2"
"github.com/kubeedge/kubeedge/tests/e2e/utils"
)
diff --git a/tests/e2e/scripts/execute.sh b/tests/e2e/scripts/execute.sh
index 6196952eb..d39568436 100755
--- a/tests/e2e/scripts/execute.sh
+++ b/tests/e2e/scripts/execute.sh
@@ -25,7 +25,7 @@ curpath=$PWD
echo $PWD
which ginkgo &> /dev/null || (
- go get github.com/onsi/ginkgo/ginkgo
+ go get github.com/onsi/ginkgo/v2/ginkgo
)
cleanup() {
diff --git a/tests/e2e/traffic/traffic_suite_test.go b/tests/e2e/traffic/traffic_suite_test.go
index 2270d663a..4e2725d3f 100644
--- a/tests/e2e/traffic/traffic_suite_test.go
+++ b/tests/e2e/traffic/traffic_suite_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"time"
- "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/spf13/pflag"
diff --git a/tests/e2e/traffic/traffic_test.go b/tests/e2e/traffic/traffic_test.go
index a96b7a0b5..4e95b1157 100644
--- a/tests/e2e/traffic/traffic_test.go
+++ b/tests/e2e/traffic/traffic_test.go
@@ -5,7 +5,7 @@ import (
"net/http"
"time"
- "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
diff --git a/vendor/github.com/benbjohnson/clock/clock.go b/vendor/github.com/benbjohnson/clock/clock.go
index 40555b303..14ddc0795 100644
--- a/vendor/github.com/benbjohnson/clock/clock.go
+++ b/vendor/github.com/benbjohnson/clock/clock.go
@@ -74,7 +74,10 @@ func (c *clock) WithTimeout(parent context.Context, t time.Duration) (context.Co
// Mock represents a mock clock that only moves forward programmically.
// It can be preferable to a real-time clock when testing time-based functionality.
type Mock struct {
- mu sync.Mutex
+ // mu protects all other fields in this struct, and the data that they
+ // point to.
+ mu sync.Mutex
+
now time.Time // current time
timers clockTimers // tickers & timers
}
@@ -89,7 +92,9 @@ func NewMock() *Mock {
// This should only be called from a single goroutine at a time.
func (m *Mock) Add(d time.Duration) {
// Calculate the final current time.
+ m.mu.Lock()
t := m.now.Add(d)
+ m.mu.Unlock()
// Continue to execute timers until there are no more before the new time.
for {
@@ -126,6 +131,23 @@ func (m *Mock) Set(t time.Time) {
gosched()
}
+// WaitForAllTimers sets the clock until all timers are expired
+func (m *Mock) WaitForAllTimers() time.Time {
+ // Continue to execute timers until there are no more
+ for {
+ m.mu.Lock()
+ if len(m.timers) == 0 {
+ m.mu.Unlock()
+ return m.Now()
+ }
+
+ sort.Sort(m.timers)
+ next := m.timers[len(m.timers)-1].Next()
+ m.mu.Unlock()
+ m.Set(next)
+ }
+}
+
// runNextTimer executes the next timer in chronological order and moves the
// current time to the timer's next tick time. The next time is not executed if
// its next time is after the max time. Returns true if a timer was executed.
@@ -150,10 +172,11 @@ func (m *Mock) runNextTimer(max time.Time) bool {
// Move "now" forward and unlock clock.
m.now = t.Next()
+ now := m.now
m.mu.Unlock()
// Execute timer.
- t.Tick(m.now)
+ t.Tick(now)
return true
}
@@ -162,12 +185,20 @@ func (m *Mock) After(d time.Duration) <-chan time.Time {
return m.Timer(d).C
}
-// AfterFunc waits for the duration to elapse and then executes a function.
+// AfterFunc waits for the duration to elapse and then executes a function in its own goroutine.
// A Timer is returned that can be stopped.
func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
- t := m.Timer(d)
- t.C = nil
- t.fn = f
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ ch := make(chan time.Time, 1)
+ t := &Timer{
+ c: ch,
+ fn: f,
+ mock: m,
+ next: m.now.Add(d),
+ stopped: false,
+ }
+ m.timers = append(m.timers, (*internalTimer)(t))
return t
}
@@ -219,7 +250,6 @@ func (m *Mock) Ticker(d time.Duration) *Ticker {
// Timer creates a new instance of Timer.
func (m *Mock) Timer(d time.Duration) *Timer {
m.mu.Lock()
- defer m.mu.Unlock()
ch := make(chan time.Time, 1)
t := &Timer{
C: ch,
@@ -229,9 +259,14 @@ func (m *Mock) Timer(d time.Duration) *Timer {
stopped: false,
}
m.timers = append(m.timers, (*internalTimer)(t))
+ now := m.now
+ m.mu.Unlock()
+ m.runNextTimer(now)
return t
}
+// removeClockTimer removes a timer from m.timers. m.mu MUST be held
+// when this method is called.
func (m *Mock) removeClockTimer(t clockTimer) {
for i, timer := range m.timers {
if timer == t {
@@ -313,7 +348,7 @@ func (t *internalTimer) Tick(now time.Time) {
t.mock.mu.Lock()
if t.fn != nil {
// defer function execution until the lock is released, and
- defer t.fn()
+ defer func() { go t.fn() }()
} else {
t.c <- now
}
@@ -324,12 +359,13 @@ func (t *internalTimer) Tick(now time.Time) {
// Ticker holds a channel that receives "ticks" at regular intervals.
type Ticker struct {
- C <-chan time.Time
- c chan time.Time
- ticker *time.Ticker // realtime impl, if set
- next time.Time // next tick time
- mock *Mock // mock clock, if set
- d time.Duration // time between ticks
+ C <-chan time.Time
+ c chan time.Time
+ ticker *time.Ticker // realtime impl, if set
+ next time.Time // next tick time
+ mock *Mock // mock clock, if set
+ d time.Duration // time between ticks
+ stopped bool // True if stopped, false if running
}
// Stop turns off the ticker.
@@ -339,6 +375,7 @@ func (t *Ticker) Stop() {
} else {
t.mock.mu.Lock()
t.mock.removeClockTimer((*internalTicker)(t))
+ t.stopped = true
t.mock.mu.Unlock()
}
}
@@ -353,6 +390,11 @@ func (t *Ticker) Reset(dur time.Duration) {
t.mock.mu.Lock()
defer t.mock.mu.Unlock()
+ if t.stopped {
+ t.mock.timers = append(t.mock.timers, (*internalTicker)(t))
+ t.stopped = false
+ }
+
t.d = dur
t.next = t.mock.now.Add(dur)
}
@@ -365,7 +407,9 @@ func (t *internalTicker) Tick(now time.Time) {
case t.c <- now:
default:
}
+ t.mock.mu.Lock()
t.next = now.Add(t.d)
+ t.mock.mu.Unlock()
gosched()
}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 792b4a60b..8bf0e5b78 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -3,8 +3,7 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
new file mode 100644
index 000000000..94b9c4439
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index 15c835d54..a9e0d45c9 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -16,19 +16,11 @@ const (
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -50,10 +42,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
index be8db5bf7..3e8b13257 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -1,215 +1,209 @@
+//go:build !appengine && gc && !purego
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
-
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
// Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
- SUBQ $32, BX
+ SUBQ $32, end
// Check whether we have at least one block.
- CMPQ DX, $32
+ CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
JMP afterBlocks
noBlocks:
- MOVQ ·prime5v(SB), AX
+ MOVQ ·primes+32(SB), h
afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
-
- CMPQ SI, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
-
- CMPQ SI, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
-
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
JGE finalize
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
+ CMPQ p, end
+ JL loop1
finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
RET
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
// Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
// Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
+ blockLoop()
// Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
new file mode 100644
index 000000000..7e3145a22
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
similarity index 73%
rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index ad14b807f..9216e0a40 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -1,3 +1,5 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 4a5a82160..26df13bba 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -1,4 +1,5 @@
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
package xxhash
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index fc9bea7a3..e86f1b5fd 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 376e0ca2e..1c1638fd8 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
// This file encapsulates usage of unsafe.
@@ -11,7 +12,7 @@ import (
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
diff --git a/vendor/github.com/cheekybits/genny/.gitignore b/vendor/github.com/cheekybits/genny/.gitignore
deleted file mode 100644
index c62d148c2..000000000
--- a/vendor/github.com/cheekybits/genny/.gitignore
+++ /dev/null
@@ -1,26 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-genny
diff --git a/vendor/github.com/cheekybits/genny/.travis.yml b/vendor/github.com/cheekybits/genny/.travis.yml
deleted file mode 100644
index 78ba5f2d1..000000000
--- a/vendor/github.com/cheekybits/genny/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.7
- - 1.8
- - 1.9
diff --git a/vendor/github.com/cheekybits/genny/README.md b/vendor/github.com/cheekybits/genny/README.md
deleted file mode 100644
index 64a28ac72..000000000
--- a/vendor/github.com/cheekybits/genny/README.md
+++ /dev/null
@@ -1,245 +0,0 @@
-# genny - Generics for Go
-
-[![Build Status](https://travis-ci.org/cheekybits/genny.svg?branch=master)](https://travis-ci.org/cheekybits/genny) [![GoDoc](https://godoc.org/github.com/cheekybits/genny/parse?status.png)](http://godoc.org/github.com/cheekybits/genny/parse)
-
-Install:
-
-```
-go get github.com/cheekybits/genny
-```
-
-=====
-
-(pron. Jenny) by Mat Ryer ([@matryer](https://twitter.com/matryer)) and Tyler Bunnell ([@TylerJBunnell](https://twitter.com/TylerJBunnell)).
-
-Until the Go core team include support for [generics in Go](http://golang.org/doc/faq#generics), `genny` is a code-generation generics solution. It allows you write normal buildable and testable Go code which, when processed by the `genny gen` tool, will replace the generics with specific types.
-
- * Generic code is valid Go code
- * Generic code compiles and can be tested
- * Use `stdin` and `stdout` or specify in and out files
- * Supports Go 1.4's [go generate](http://tip.golang.org/doc/go1.4#gogenerate)
- * Multiple specific types will generate every permutation
- * Use `BUILTINS` and `NUMBERS` wildtype to generate specific code for all built-in (and number) Go types
- * Function names and comments also get updated
-
-## Library
-
-We have started building a [library of common things](https://github.com/cheekybits/gennylib), and you can use `genny get` to generate the specific versions you need.
-
-For example: `genny get maps/concurrentmap.go "KeyType=BUILTINS ValueType=BUILTINS"` will print out generated code for all types for a concurrent map. Any file in the library may be generated locally in this way using all the same options given to `genny gen`.
-
-## Usage
-
-```
-genny [{flags}] gen "{types}"
-
-gen - generates type specific code from generic code.
-get - fetch a generic template from the online library and gen it.
-
-{flags} - (optional) Command line flags (see below)
-{types} - (required) Specific types for each generic type in the source
-{types} format: {generic}={specific}[,another][ {generic2}={specific2}]
-
-Examples:
- Generic=Specific
- Generic1=Specific1 Generic2=Specific2
- Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
-
-Flags:
- -in="": file to parse instead of stdin
- -out="": file to save output to instead of stdout
- -pkg="": package name for generated files
-```
-
- * Comma separated type lists will generate code for each type
-
-### Flags
-
- * `-in` - specify the input file (rather than using stdin)
- * `-out` - specify the output file (rather than using stdout)
-
-### go generate
-
-To use Go 1.4's `go generate` capability, insert the following comment in your source code file:
-
-```
-//go:generate genny -in=$GOFILE -out=gen-$GOFILE gen "KeyType=string,int ValueType=string,int"
-```
-
- * Start the line with `//go:generate `
- * Use the `-in` and `-out` flags to specify the files to work on
- * Use the `genny` command as usual after the flags
-
-Now, running `go generate` (in a shell) for the package will cause the generic versions of the files to be generated.
-
- * The output file will be overwritten, so it's safe to call `go generate` many times
- * Use `$GOFILE` to refer to the current file
- * The `//go:generate` line will be removed from the output
-
-To see a real example of how to use `genny` with `go generate`, look in the [example/go-generate directory](https://github.com/cheekybits/genny/tree/master/examples/go-generate).
-
-## How it works
-
-Define your generic types using the special `generic.Type` placeholder type:
-
-```go
-type KeyType generic.Type
-type ValueType generic.Type
-```
-
- * You can use as many as you like
- * Give them meaningful names
-
-Then write the generic code referencing the types as your normally would:
-
-```go
-func SetValueTypeForKeyType(key KeyType, value ValueType) { /* ... */ }
-```
-
- * Generic type names will also be replaced in comments and function names (see Real example below)
-
-Since `generic.Type` is a real Go type, your code will compile, and you can even write unit tests against your generic code.
-
-#### Generating specific versions
-
-Pass the file through the `genny gen` tool with the specific types as the argument:
-
-```
-cat generic.go | genny gen "KeyType=string ValueType=interface{}"
-```
-
-The output will be the complete Go source file with the generic types replaced with the types specified in the arguments.
-
-## Real example
-
-Given [this generic Go code](https://github.com/cheekybits/genny/tree/master/examples/queue) which compiles and is tested:
-
-```go
-package queue
-
-import "github.com/cheekybits/genny/generic"
-
-// NOTE: this is how easy it is to define a generic type
-type Something generic.Type
-
-// SomethingQueue is a queue of Somethings.
-type SomethingQueue struct {
- items []Something
-}
-
-func NewSomethingQueue() *SomethingQueue {
- return &SomethingQueue{items: make([]Something, 0)}
-}
-func (q *SomethingQueue) Push(item Something) {
- q.items = append(q.items, item)
-}
-func (q *SomethingQueue) Pop() Something {
- item := q.items[0]
- q.items = q.items[1:]
- return item
-}
-```
-
-When `genny gen` is invoked like this:
-
-```
-cat source.go | genny gen "Something=string"
-```
-
-It outputs:
-
-```go
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package queue
-
-// StringQueue is a queue of Strings.
-type StringQueue struct {
- items []string
-}
-
-func NewStringQueue() *StringQueue {
- return &StringQueue{items: make([]string, 0)}
-}
-func (q *StringQueue) Push(item string) {
- q.items = append(q.items, item)
-}
-func (q *StringQueue) Pop() string {
- item := q.items[0]
- q.items = q.items[1:]
- return item
-}
-```
-
-To get a _something_ for every built-in Go type plus one of your own types, you could run:
-
-```
-cat source.go | genny gen "Something=BUILTINS,*MyType"
-```
-
-#### More examples
-
-Check out the [test code files](https://github.com/cheekybits/genny/tree/master/parse/test) for more real examples.
-
-## Writing test code
-
-Once you have defined a generic type with some code worth testing:
-
-```go
-package slice
-
-import (
- "log"
- "reflect"
-
- "github.com/stretchr/gogen/generic"
-)
-
-type MyType generic.Type
-
-func EnsureMyTypeSlice(objectOrSlice interface{}) []MyType {
- log.Printf("%v", reflect.TypeOf(objectOrSlice))
- switch obj := objectOrSlice.(type) {
- case []MyType:
- log.Println(" returning it untouched")
- return obj
- case MyType:
- log.Println(" wrapping in slice")
- return []MyType{obj}
- default:
- panic("ensure slice needs MyType or []MyType")
- }
-}
-```
-
-You can treat it like any normal Go type in your test code:
-
-```go
-func TestEnsureMyTypeSlice(t *testing.T) {
-
- myType := new(MyType)
- slice := EnsureMyTypeSlice(myType)
- if assert.NotNil(t, slice) {
- assert.Equal(t, slice[0], myType)
- }
-
- slice = EnsureMyTypeSlice(slice)
- log.Printf("%#v", slice[0])
- if assert.NotNil(t, slice) {
- assert.Equal(t, slice[0], myType)
- }
-
-}
-```
-
-### Understanding what `generic.Type` is
-
-Because `generic.Type` is an empty interface type (literally `interface{}`) every other type will be considered to be a `generic.Type` if you are switching on the type of an object. Of course, once the specific versions are generated, this issue goes away but it's worth knowing when you are writing your tests against generic code.
-
-### Contributions
-
- * See the [API documentation for the parse package](http://godoc.org/github.com/cheekybits/genny/parse)
- * Please do TDD
- * All input welcome
diff --git a/vendor/github.com/cheekybits/genny/doc.go b/vendor/github.com/cheekybits/genny/doc.go
deleted file mode 100644
index 4c31e22bc..000000000
--- a/vendor/github.com/cheekybits/genny/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package main is the command line tool for Genny.
-package main
diff --git a/vendor/github.com/cheekybits/genny/generic/doc.go b/vendor/github.com/cheekybits/genny/generic/doc.go
deleted file mode 100644
index 3bd6c869c..000000000
--- a/vendor/github.com/cheekybits/genny/generic/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package generic contains the generic marker types.
-package generic
diff --git a/vendor/github.com/cheekybits/genny/generic/generic.go b/vendor/github.com/cheekybits/genny/generic/generic.go
deleted file mode 100644
index 04a2306cb..000000000
--- a/vendor/github.com/cheekybits/genny/generic/generic.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package generic
-
-// Type is the placeholder type that indicates a generic value.
-// When genny is executed, variables of this type will be replaced with
-// references to the specific types.
-// var GenericType generic.Type
-type Type interface{}
-
-// Number is the placehoder type that indiccates a generic numerical value.
-// When genny is executed, variables of this type will be replaced with
-// references to the specific types.
-// var GenericType generic.Number
-type Number float64
diff --git a/vendor/github.com/cheekybits/genny/main.go b/vendor/github.com/cheekybits/genny/main.go
deleted file mode 100644
index fe06a6c03..000000000
--- a/vendor/github.com/cheekybits/genny/main.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "strings"
-
- "github.com/cheekybits/genny/out"
- "github.com/cheekybits/genny/parse"
-)
-
-/*
-
- source | genny gen [-in=""] [-out=""] [-pkg=""] "KeyType=string,int ValueType=string,int"
-
-*/
-
-const (
- _ = iota
- exitcodeInvalidArgs
- exitcodeInvalidTypeSet
- exitcodeStdinFailed
- exitcodeGenFailed
- exitcodeGetFailed
- exitcodeSourceFileInvalid
- exitcodeDestFileFailed
-)
-
-func main() {
- var (
- in = flag.String("in", "", "file to parse instead of stdin")
- out = flag.String("out", "", "file to save output to instead of stdout")
- pkgName = flag.String("pkg", "", "package name for generated files")
- prefix = "https://github.com/metabition/gennylib/raw/master/"
- )
- flag.Parse()
- args := flag.Args()
-
- if len(args) < 2 {
- usage()
- os.Exit(exitcodeInvalidArgs)
- }
-
- if strings.ToLower(args[0]) != "gen" && strings.ToLower(args[0]) != "get" {
- usage()
- os.Exit(exitcodeInvalidArgs)
- }
-
- // parse the typesets
- var setsArg = args[1]
- if strings.ToLower(args[0]) == "get" {
- setsArg = args[2]
- }
- typeSets, err := parse.TypeSet(setsArg)
- if err != nil {
- fatal(exitcodeInvalidTypeSet, err)
- }
-
- outWriter := newWriter(*out)
-
- if strings.ToLower(args[0]) == "get" {
- if len(args) != 3 {
- fmt.Println("not enough arguments to get")
- usage()
- os.Exit(exitcodeInvalidArgs)
- }
- r, err := http.Get(prefix + args[1])
- if err != nil {
- fatal(exitcodeGetFailed, err)
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- fatal(exitcodeGetFailed, err)
- }
- r.Body.Close()
- br := bytes.NewReader(b)
- err = gen(*in, *pkgName, br, typeSets, outWriter)
- } else if len(*in) > 0 {
- var file *os.File
- file, err = os.Open(*in)
- if err != nil {
- fatal(exitcodeSourceFileInvalid, err)
- }
- defer file.Close()
- err = gen(*in, *pkgName, file, typeSets, outWriter)
- } else {
- var source []byte
- source, err = ioutil.ReadAll(os.Stdin)
- if err != nil {
- fatal(exitcodeStdinFailed, err)
- }
- reader := bytes.NewReader(source)
- err = gen("stdin", *pkgName, reader, typeSets, outWriter)
- }
-
- // do the work
- if err != nil {
- fatal(exitcodeGenFailed, err)
- }
-
-}
-
-func usage() {
- fmt.Fprintln(os.Stderr, `usage: genny [{flags}] gen "{types}"
-
-gen - generates type specific code from generic code.
-get - fetch a generic template from the online library and gen it.
-
-{flags} - (optional) Command line flags (see below)
-{types} - (required) Specific types for each generic type in the source
-{types} format: {generic}={specific}[,another][ {generic2}={specific2}]
-
-Examples:
- Generic=Specific
- Generic1=Specific1 Generic2=Specific2
- Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
-
-Flags:`)
- flag.PrintDefaults()
-}
-
-func newWriter(fileName string) io.Writer {
- if fileName == "" {
- return os.Stdout
- }
- lf := &out.LazyFile{FileName: fileName}
- defer lf.Close()
- return lf
-}
-
-func fatal(code int, a ...interface{}) {
- fmt.Println(a...)
- os.Exit(code)
-}
-
-// gen performs the generic generation.
-func gen(filename, pkgName string, in io.ReadSeeker, typesets []map[string]string, out io.Writer) error {
-
- var output []byte
- var err error
-
- output, err = parse.Generics(filename, pkgName, in, typesets)
- if err != nil {
- return err
- }
-
- out.Write(output)
- return nil
-}
diff --git a/vendor/github.com/cheekybits/genny/out/lazy_file.go b/vendor/github.com/cheekybits/genny/out/lazy_file.go
deleted file mode 100644
index 7c8815f5f..000000000
--- a/vendor/github.com/cheekybits/genny/out/lazy_file.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package out
-
-import (
- "os"
- "path"
-)
-
-// LazyFile is an io.WriteCloser which defers creation of the file it is supposed to write in
-// till the first call to its write function in order to prevent creation of file, if no write
-// is supposed to happen.
-type LazyFile struct {
- // FileName is path to the file to which genny will write.
- FileName string
- file *os.File
-}
-
-// Close closes the file if it is created. Returns nil if no file is created.
-func (lw *LazyFile) Close() error {
- if lw.file != nil {
- return lw.file.Close()
- }
- return nil
-}
-
-// Write writes to the specified file and creates the file first time it is called.
-func (lw *LazyFile) Write(p []byte) (int, error) {
- if lw.file == nil {
- err := os.MkdirAll(path.Dir(lw.FileName), 0755)
- if err != nil {
- return 0, err
- }
- lw.file, err = os.Create(lw.FileName)
- if err != nil {
- return 0, err
- }
- }
- return lw.file.Write(p)
-}
diff --git a/vendor/github.com/cheekybits/genny/parse/builtins.go b/vendor/github.com/cheekybits/genny/parse/builtins.go
deleted file mode 100644
index e02995444..000000000
--- a/vendor/github.com/cheekybits/genny/parse/builtins.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package parse
-
-// Builtins contains a slice of all built-in Go types.
-var Builtins = []string{
- "bool",
- "byte",
- "complex128",
- "complex64",
- "error",
- "float32",
- "float64",
- "int",
- "int16",
- "int32",
- "int64",
- "int8",
- "rune",
- "string",
- "uint",
- "uint16",
- "uint32",
- "uint64",
- "uint8",
- "uintptr",
-}
-
-// Numbers contains a slice of all built-in number types.
-var Numbers = []string{
- "float32",
- "float64",
- "int",
- "int16",
- "int32",
- "int64",
- "int8",
- "uint",
- "uint16",
- "uint32",
- "uint64",
- "uint8",
-}
diff --git a/vendor/github.com/cheekybits/genny/parse/doc.go b/vendor/github.com/cheekybits/genny/parse/doc.go
deleted file mode 100644
index 1be4fed8b..000000000
--- a/vendor/github.com/cheekybits/genny/parse/doc.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Package parse contains the generic code generation capabilities
-// that power genny.
-//
-// genny gen "{types}"
-//
-// gen - generates type specific code (to stdout) from generic code (via stdin)
-//
-// {types} - (required) Specific types for each generic type in the source
-// {types} format: {generic}={specific}[,another][ {generic2}={specific2}]
-// Examples:
-// Generic=Specific
-// Generic1=Specific1 Generic2=Specific2
-// Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
-package parse
diff --git a/vendor/github.com/cheekybits/genny/parse/errors.go b/vendor/github.com/cheekybits/genny/parse/errors.go
deleted file mode 100644
index ab812bf90..000000000
--- a/vendor/github.com/cheekybits/genny/parse/errors.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package parse
-
-import (
- "errors"
-)
-
-// errMissingSpecificType represents an error when a generic type is not
-// satisfied by a specific type.
-type errMissingSpecificType struct {
- GenericType string
-}
-
-// Error gets a human readable string describing this error.
-func (e errMissingSpecificType) Error() string {
- return "Missing specific type for '" + e.GenericType + "' generic type"
-}
-
-// errImports represents an error from goimports.
-type errImports struct {
- Err error
-}
-
-// Error gets a human readable string describing this error.
-func (e errImports) Error() string {
- return "Failed to goimports the generated code: " + e.Err.Error()
-}
-
-// errSource represents an error with the source file.
-type errSource struct {
- Err error
-}
-
-// Error gets a human readable string describing this error.
-func (e errSource) Error() string {
- return "Failed to parse source file: " + e.Err.Error()
-}
-
-type errBadTypeArgs struct {
- Message string
- Arg string
-}
-
-func (e errBadTypeArgs) Error() string {
- return "\"" + e.Arg + "\" is bad: " + e.Message
-}
-
-var errMissingTypeInformation = errors.New("No type arguments were specified and no \"// +gogen\" tag was found in the source.")
diff --git a/vendor/github.com/cheekybits/genny/parse/parse.go b/vendor/github.com/cheekybits/genny/parse/parse.go
deleted file mode 100644
index 08eb48b11..000000000
--- a/vendor/github.com/cheekybits/genny/parse/parse.go
+++ /dev/null
@@ -1,298 +0,0 @@
-package parse
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "io"
- "os"
- "strings"
- "unicode"
-
- "golang.org/x/tools/imports"
-)
-
-var header = []byte(`
-
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-`)
-
-var (
- packageKeyword = []byte("package")
- importKeyword = []byte("import")
- openBrace = []byte("(")
- closeBrace = []byte(")")
- genericPackage = "generic"
- genericType = "generic.Type"
- genericNumber = "generic.Number"
- linefeed = "\r\n"
-)
-var unwantedLinePrefixes = [][]byte{
- []byte("//go:generate genny "),
-}
-
-func subIntoLiteral(lit, typeTemplate, specificType string) string {
- if lit == typeTemplate {
- return specificType
- }
- if !strings.Contains(lit, typeTemplate) {
- return lit
- }
- specificLg := wordify(specificType, true)
- specificSm := wordify(specificType, false)
- result := strings.Replace(lit, typeTemplate, specificLg, -1)
- if strings.HasPrefix(result, specificLg) && !isExported(lit) {
- return strings.Replace(result, specificLg, specificSm, 1)
- }
- return result
-}
-
-func subTypeIntoComment(line, typeTemplate, specificType string) string {
- var subbed string
- for _, w := range strings.Fields(line) {
- subbed = subbed + subIntoLiteral(w, typeTemplate, specificType) + " "
- }
- return subbed
-}
-
-// Does the heavy lifting of taking a line of our code and
-// sbustituting a type into there for our generic type
-func subTypeIntoLine(line, typeTemplate, specificType string) string {
- src := []byte(line)
- var s scanner.Scanner
- fset := token.NewFileSet()
- file := fset.AddFile("", fset.Base(), len(src))
- s.Init(file, src, nil, scanner.ScanComments)
- output := ""
- for {
- _, tok, lit := s.Scan()
- if tok == token.EOF {
- break
- } else if tok == token.COMMENT {
- subbed := subTypeIntoComment(lit, typeTemplate, specificType)
- output = output + subbed + " "
- } else if tok.IsLiteral() {
- subbed := subIntoLiteral(lit, typeTemplate, specificType)
- output = output + subbed + " "
- } else {
- output = output + tok.String() + " "
- }
- }
- return output
-}
-
-// typeSet looks like "KeyType: int, ValueType: string"
-func generateSpecific(filename string, in io.ReadSeeker, typeSet map[string]string) ([]byte, error) {
-
- // ensure we are at the beginning of the file
- in.Seek(0, os.SEEK_SET)
-
- // parse the source file
- fs := token.NewFileSet()
- file, err := parser.ParseFile(fs, filename, in, 0)
- if err != nil {
- return nil, &errSource{Err: err}
- }
-
- // make sure every generic.Type is represented in the types
- // argument.
- for _, decl := range file.Decls {
- switch it := decl.(type) {
- case *ast.GenDecl:
- for _, spec := range it.Specs {
- ts, ok := spec.(*ast.TypeSpec)
- if !ok {
- continue
- }
- switch tt := ts.Type.(type) {
- case *ast.SelectorExpr:
- if name, ok := tt.X.(*ast.Ident); ok {
- if name.Name == genericPackage {
- if _, ok := typeSet[ts.Name.Name]; !ok {
- return nil, &errMissingSpecificType{GenericType: ts.Name.Name}
- }
- }
- }
- }
- }
- }
- }
-
- in.Seek(0, os.SEEK_SET)
-
- var buf bytes.Buffer
-
- comment := ""
- scanner := bufio.NewScanner(in)
- for scanner.Scan() {
-
- line := scanner.Text()
-
- // does this line contain generic.Type?
- if strings.Contains(line, genericType) || strings.Contains(line, genericNumber) {
- comment = ""
- continue
- }
-
- for t, specificType := range typeSet {
- if strings.Contains(line, t) {
- newLine := subTypeIntoLine(line, t, specificType)
- line = newLine
- }
- }
-
- if comment != "" {
- buf.WriteString(makeLine(comment))
- comment = ""
- }
-
- // is this line a comment?
- // TODO: should we handle /* */ comments?
- if strings.HasPrefix(line, "//") {
- // record this line to print later
- comment = line
- continue
- }
-
- // write the line
- buf.WriteString(makeLine(line))
- }
-
- // write it out
- return buf.Bytes(), nil
-}
-
-// Generics parses the source file and generates the bytes replacing the
-// generic types for the keys map with the specific types (its value).
-func Generics(filename, pkgName string, in io.ReadSeeker, typeSets []map[string]string) ([]byte, error) {
-
- totalOutput := header
-
- for _, typeSet := range typeSets {
-
- // generate the specifics
- parsed, err := generateSpecific(filename, in, typeSet)
- if err != nil {
- return nil, err
- }
-
- totalOutput = append(totalOutput, parsed...)
-
- }
-
- // clean up the code line by line
- packageFound := false
- insideImportBlock := false
- var cleanOutputLines []string
- scanner := bufio.NewScanner(bytes.NewReader(totalOutput))
- for scanner.Scan() {
-
- // end of imports block?
- if insideImportBlock {
- if bytes.HasSuffix(scanner.Bytes(), closeBrace) {
- insideImportBlock = false
- }
- continue
- }
-
- if bytes.HasPrefix(scanner.Bytes(), packageKeyword) {
- if packageFound {
- continue
- } else {
- packageFound = true
- }
- } else if bytes.HasPrefix(scanner.Bytes(), importKeyword) {
- if bytes.HasSuffix(scanner.Bytes(), openBrace) {
- insideImportBlock = true
- }
- continue
- }
-
- // check all unwantedLinePrefixes - and skip them
- skipline := false
- for _, prefix := range unwantedLinePrefixes {
- if bytes.HasPrefix(scanner.Bytes(), prefix) {
- skipline = true
- continue
- }
- }
-
- if skipline {
- continue
- }
-
- cleanOutputLines = append(cleanOutputLines, makeLine(scanner.Text()))
- }
-
- cleanOutput := strings.Join(cleanOutputLines, "")
-
- output := []byte(cleanOutput)
- var err error
-
- // change package name
- if pkgName != "" {
- output = changePackage(bytes.NewReader([]byte(output)), pkgName)
- }
- // fix the imports
- output, err = imports.Process(filename, output, nil)
- if err != nil {
- return nil, &errImports{Err: err}
- }
-
- return output, nil
-}
-
-func makeLine(s string) string {
- return fmt.Sprintln(strings.TrimRight(s, linefeed))
-}
-
-// isAlphaNumeric gets whether the rune is alphanumeric or _.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
-
-// wordify turns a type into a nice word for function and type
-// names etc.
-func wordify(s string, exported bool) string {
- s = strings.TrimRight(s, "{}")
- s = strings.TrimLeft(s, "*&")
- s = strings.Replace(s, ".", "", -1)
- if !exported {
- return s
- }
- return strings.ToUpper(string(s[0])) + s[1:]
-}
-
-func changePackage(r io.Reader, pkgName string) []byte {
- var out bytes.Buffer
- sc := bufio.NewScanner(r)
- done := false
-
- for sc.Scan() {
- s := sc.Text()
-
- if !done && strings.HasPrefix(s, "package") {
- parts := strings.Split(s, " ")
- parts[1] = pkgName
- s = strings.Join(parts, " ")
- done = true
- }
-
- fmt.Fprintln(&out, s)
- }
- return out.Bytes()
-}
-
-func isExported(lit string) bool {
- if len(lit) == 0 {
- return false
- }
- return unicode.IsUpper(rune(lit[0]))
-}
diff --git a/vendor/github.com/cheekybits/genny/parse/typesets.go b/vendor/github.com/cheekybits/genny/parse/typesets.go
deleted file mode 100644
index c30b97289..000000000
--- a/vendor/github.com/cheekybits/genny/parse/typesets.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package parse
-
-import "strings"
-
-const (
- typeSep = " "
- keyValueSep = "="
- valuesSep = ","
- builtins = "BUILTINS"
- numbers = "NUMBERS"
-)
-
-// TypeSet turns a type string into a []map[string]string
-// that can be given to parse.Generics for it to do its magic.
-//
-// Acceptable args are:
-//
-// Person=man
-// Person=man Animal=dog
-// Person=man Animal=dog Animal2=cat
-// Person=man,woman Animal=dog,cat
-// Person=man,woman,child Animal=dog,cat Place=london,paris
-func TypeSet(arg string) ([]map[string]string, error) {
-
- types := make(map[string][]string)
- var keys []string
- for _, pair := range strings.Split(arg, typeSep) {
- segs := strings.Split(pair, keyValueSep)
- if len(segs) != 2 {
- return nil, &errBadTypeArgs{Arg: arg, Message: "Generic=Specific expected"}
- }
- key := segs[0]
- keys = append(keys, key)
- types[key] = make([]string, 0)
- for _, t := range strings.Split(segs[1], valuesSep) {
- if t == builtins {
- types[key] = append(types[key], Builtins...)
- } else if t == numbers {
- types[key] = append(types[key], Numbers...)
- } else {
- types[key] = append(types[key], t)
- }
- }
- }
-
- cursors := make(map[string]int)
- for _, key := range keys {
- cursors[key] = 0
- }
-
- outChan := make(chan map[string]string)
- go func() {
- buildTypeSet(keys, 0, cursors, types, outChan)
- close(outChan)
- }()
-
- var typeSets []map[string]string
- for typeSet := range outChan {
- typeSets = append(typeSets, typeSet)
- }
-
- return typeSets, nil
-
-}
-
-func buildTypeSet(keys []string, keyI int, cursors map[string]int, types map[string][]string, out chan<- map[string]string) {
- key := keys[keyI]
- for cursors[key] < len(types[key]) {
- if keyI < len(keys)-1 {
- buildTypeSet(keys, keyI+1, copycursors(cursors), types, out)
- } else {
- // build the typeset for this combination
- ts := make(map[string]string)
- for k, vals := range types {
- ts[k] = vals[cursors[k]]
- }
- out <- ts
- }
- cursors[key]++
- }
-}
-
-func copycursors(source map[string]int) map[string]int {
- copy := make(map[string]int)
- for k, v := range source {
- copy[k] = v
- }
- return copy
-}
diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md
index eccb9d984..d2073af3a 100644
--- a/vendor/github.com/containerd/cgroups/README.md
+++ b/vendor/github.com/containerd/cgroups/README.md
@@ -9,7 +9,7 @@ Go package for creating, managing, inspecting, and destroying cgroups.
The resources format for settings on the cgroup uses the OCI runtime-spec found
[here](https://github.com/opencontainers/runtime-spec).
-## Examples
+## Examples (v1)
### Create a new cgroup
@@ -58,7 +58,7 @@ if err := control.Add(cgroups.Process{Pid:1234}); err != nil {
}
```
-### Update the cgroup
+### Update the cgroup
To update the resources applied in the cgroup
@@ -133,6 +133,61 @@ event := cgroups.OOMEvent()
efd, err := control.RegisterMemoryEvent(event)
```
+## Examples (v2/unified)
+
+### Check that the current system is running cgroups v2
+
+```go
+var cgroupV2 bool
+if cgroups.Mode() == cgroups.Unified {
+ cgroupV2 = true
+}
+```
+
+### Create a new cgroup
+
+This creates a new systemd v2 cgroup slice. Systemd slices consider ["-" a special character](https://www.freedesktop.org/software/systemd/man/systemd.slice.html),
+so the resulting slice would be located here on disk:
+
+* /sys/fs/cgroup/my.slice/my-cgroup.slice/my-cgroup-abc.slice
+
+```go
+import (
+ cgroupsv2 "github.com/containerd/cgroups/v2"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+res := cgroupsv2.Resources{}
+// dummy PID of -1 is used for creating a "general slice" to be used as a parent cgroup.
+// see https://github.com/containerd/cgroups/blob/1df78138f1e1e6ee593db155c6b369466f577651/v2/manager.go#L732-L735
+m, err := cgroupsv2.NewSystemd("/", "my-cgroup-abc.slice", -1, &res)
+if err != nil {
+ return err
+}
+```
+
+### Load an existing cgroup
+
+```go
+m, err := cgroupsv2.LoadSystemd("/", "my-cgroup-abc.slice")
+if err != nil {
+ return err
+}
+```
+
+### Delete a cgroup
+
+```go
+m, err := cgroupsv2.LoadSystemd("/", "my-cgroup-abc.slice")
+if err != nil {
+ return err
+}
+err = m.DeleteSystemd()
+if err != nil {
+ return err
+}
+```
+
### Attention
All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name
diff --git a/vendor/github.com/containerd/cgroups/Vagrantfile b/vendor/github.com/containerd/cgroups/Vagrantfile
deleted file mode 100644
index 9a4aac8cb..000000000
--- a/vendor/github.com/containerd/cgroups/Vagrantfile
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-# Fedora box is used for testing cgroup v2 support
- config.vm.box = "fedora/35-cloud-base"
- config.vm.provider :virtualbox do |v|
- v.memory = 4096
- v.cpus = 2
- end
- config.vm.provider :libvirt do |v|
- v.memory = 4096
- v.cpus = 2
- end
- config.vm.provision "shell", inline: <<-SHELL
- set -eux -o pipefail
- # configuration
- GO_VERSION="1.17.7"
-
- # install gcc and Golang
- dnf -y install gcc
- curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
-
- # setup env vars
- cat >> /etc/profile.d/sh.local < /test.sh < 0 {
+ errs = append(errs, fmt.Sprintf("%s (contains running processes)", string(s.Name())))
+ continue
+ }
if d, ok := s.(deleter); ok {
sp, err := c.path(s.Name())
if err != nil {
@@ -247,6 +256,7 @@ func (c *cgroup) Delete() error {
if err := remove(path); err != nil {
errs = append(errs, path)
}
+ continue
}
}
if len(errs) > 0 {
diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go
index e5fc864bd..1022fa379 100644
--- a/vendor/github.com/containerd/cgroups/cpuacct.go
+++ b/vendor/github.com/containerd/cgroups/cpuacct.go
@@ -17,8 +17,9 @@
package cgroups
import (
+ "bufio"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"strconv"
"strings"
@@ -70,7 +71,7 @@ func (c *cpuacctController) Stat(path string, stats *v1.Metrics) error {
func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
var usage []uint64
- data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
+ data, err := os.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
if err != nil {
return nil, err
}
@@ -86,36 +87,41 @@ func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
statPath := filepath.Join(c.Path(path), "cpuacct.stat")
- data, err := ioutil.ReadFile(statPath)
+ f, err := os.Open(statPath)
if err != nil {
return 0, 0, err
}
- fields := strings.Fields(string(data))
- if len(fields) != 4 {
- return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
+ defer f.Close()
+ var (
+ raw = make(map[string]uint64)
+ sc = bufio.NewScanner(f)
+ )
+ for sc.Scan() {
+ key, v, err := parseKV(sc.Text())
+ if err != nil {
+ return 0, 0, err
+ }
+ raw[key] = v
+ }
+ if err := sc.Err(); err != nil {
+ return 0, 0, err
}
for _, t := range []struct {
- index int
name string
value *uint64
}{
{
- index: 0,
name: "user",
value: &user,
},
{
- index: 2,
name: "system",
value: &kernel,
},
} {
- if fields[t.index] != t.name {
- return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
- }
- v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
- if err != nil {
- return 0, 0, err
+ v, ok := raw[t.name]
+ if !ok {
+ return 0, 0, fmt.Errorf("expected field %q but not found in %q", t.name, statPath)
}
*t.value = v
}
diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go
index 3cae173bd..8b56d3dba 100644
--- a/vendor/github.com/containerd/cgroups/cpuset.go
+++ b/vendor/github.com/containerd/cgroups/cpuset.go
@@ -19,7 +19,6 @@ package cgroups
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -87,10 +86,10 @@ func (c *cpusetController) Update(path string, resources *specs.LinuxResources)
}
func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
- if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
+ if cpus, err = os.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
return
}
- if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
+ if mems, err = os.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
return
}
return cpus, mems, nil
diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go
index 59a7e7128..5783f0dcc 100644
--- a/vendor/github.com/containerd/cgroups/freezer.go
+++ b/vendor/github.com/containerd/cgroups/freezer.go
@@ -17,7 +17,7 @@
package cgroups
import (
- "io/ioutil"
+ "os"
"path/filepath"
"strings"
"time"
@@ -58,7 +58,7 @@ func (f *freezerController) changeState(path string, state State) error {
}
func (f *freezerController) state(path string) (State, error) {
- current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
+ current, err := os.ReadFile(filepath.Join(f.root, path, "freezer.state"))
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go
index ce78e44c1..66a1b6b44 100644
--- a/vendor/github.com/containerd/cgroups/pids.go
+++ b/vendor/github.com/containerd/cgroups/pids.go
@@ -17,7 +17,6 @@
package cgroups
import (
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -69,7 +68,7 @@ func (p *pidsController) Stat(path string, stats *v1.Metrics) error {
return err
}
var max uint64
- maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max"))
+ maxData, err := os.ReadFile(filepath.Join(p.Path(path), "pids.max"))
if err != nil {
return err
}
diff --git a/vendor/github.com/containerd/cgroups/rdma.go b/vendor/github.com/containerd/cgroups/rdma.go
index 3b59b1071..9d414203e 100644
--- a/vendor/github.com/containerd/cgroups/rdma.go
+++ b/vendor/github.com/containerd/cgroups/rdma.go
@@ -17,7 +17,6 @@
package cgroups
import (
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -126,13 +125,13 @@ func toRdmaEntry(strEntries []string) []*v1.RdmaEntry {
func (p *rdmaController) Stat(path string, stats *v1.Metrics) error {
- currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current"))
+ currentData, err := os.ReadFile(filepath.Join(p.Path(path), "rdma.current"))
if err != nil {
return err
}
currentPerDevices := strings.Split(string(currentData), "\n")
- maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max"))
+ maxData, err := os.ReadFile(filepath.Join(p.Path(path), "rdma.max"))
if err != nil {
return err
}
diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go
index 217138975..c17a3a414 100644
--- a/vendor/github.com/containerd/cgroups/utils.go
+++ b/vendor/github.com/containerd/cgroups/utils.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -200,7 +199,7 @@ func hugePageSizes() ([]string, error) {
pageSizes []string
sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
)
- files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+ files, err := os.ReadDir("/sys/kernel/mm/hugepages")
if err != nil {
return nil, err
}
@@ -216,7 +215,7 @@ func hugePageSizes() ([]string, error) {
}
func readUint(path string) (uint64, error) {
- v, err := ioutil.ReadFile(path)
+ v, err := os.ReadFile(path)
if err != nil {
return 0, err
}
@@ -382,7 +381,7 @@ func retryingWriteFile(path string, data []byte, mode os.FileMode) error {
// Retry writes on EINTR; see:
// https://github.com/golang/go/issues/38033
for {
- err := ioutil.WriteFile(path, data, mode)
+ err := os.WriteFile(path, data, mode)
if err == nil {
return nil
} else if !errors.Is(err, syscall.EINTR) {
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
index cff5af1a6..147f756fe 100644
--- a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
@@ -176,6 +176,11 @@ func (c *Conn) Close() {
c.sigconn.Close()
}
+// Connected returns whether conn is connected
+func (c *Conn) Connected() bool {
+ return c.sysconn.Connected() && c.sigconn.Connected()
+}
+
// NewConnection establishes a connection to a bus using a caller-supplied function.
// This allows connecting to remote buses through a user-supplied mechanism.
// The supplied function may be called multiple times, and should return independent connections.
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
index fa04afc70..074148cb4 100644
--- a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
@@ -417,6 +417,29 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
return status, nil
}
+// GetUnitByPID returns the unit object path of the unit a process ID
+// belongs to. It takes a UNIX PID and returns the object path. The PID must
+// refer to an existing system process
+func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) {
+ var result dbus.ObjectPath
+
+ err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result)
+
+ return result, err
+}
+
+// GetUnitNameByPID returns the name of the unit a process ID belongs to. It
+// takes a UNIX PID and returns the object path. The PID must refer to an
+// existing system process
+func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) {
+ path, err := c.GetUnitByPID(ctx, pid)
+ if err != nil {
+ return "", err
+ }
+
+ return unitName(path), nil
+}
+
// Deprecated: use ListUnitsContext instead.
func (c *Conn) ListUnits() ([]UnitStatus, error) {
return c.ListUnitsContext(context.Background())
@@ -828,3 +851,14 @@ func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) {
return status, nil
}
+
+// Freeze the cgroup associated with the unit.
+// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2.
+func (c *Conn) FreezeUnit(ctx context.Context, unit string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store()
+}
+
+// Unfreeze the cgroup associated with the unit.
+func (c *Conn) ThawUnit(ctx context.Context, unit string) error {
+ return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store()
+}
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go
index ebbdfc541..96869a3cd 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go
@@ -1,5 +1,5 @@
// Copyright (c) 2015 The btcsuite developers
-// Copyright (c) 2015-2016 The Decred developers
+// Copyright (c) 2015-2023 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@@ -9,7 +9,7 @@ package secp256k1
// public key using Diffie-Hellman key exchange (ECDH) (RFC 5903).
// RFC5903 Section 9 states we should only return x.
//
-// It is recommended to securily hash the result before using as a cryptographic
+// It is recommended to securely hash the result before using as a cryptographic
// key.
func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
var point, result JacobianPoint
diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
index 3ca5b7c2f..ca3e8da28 100644
--- a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
+++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go
@@ -1,12 +1,13 @@
// Copyright (c) 2013-2014 The btcsuite developers
-// Copyright (c) 2015-2022 The Decred developers
+// Copyright (c) 2015-2023 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
import (
- csprng "crypto/rand"
+ cryptorand "crypto/rand"
+ "io"
)
// PrivateKey provides facilities for working with secp256k1 private keys within
@@ -26,29 +27,36 @@ func NewPrivateKey(key *ModNScalar) *PrivateKey {
// interpreted as an unsigned 256-bit big-endian integer in the range [0, N-1],
// where N is the order of the curve.
//
-// Note that this means passing a slice with more than 32 bytes is truncated and
-// that truncated value is reduced modulo N. It is up to the caller to either
-// provide a value in the appropriate range or choose to accept the described
-// behavior.
+// WARNING: This means passing a slice with more than 32 bytes is truncated and
+// that truncated value is reduced modulo N. Further, 0 is not a valid private
+// key. It is up to the caller to provide a value in the appropriate range of
+// [1, N-1]. Failure to do so will either result in an invalid private key or
+// potentially weak private keys that have bias that could be exploited.
//
-// Typically callers should simply make use of GeneratePrivateKey when creating
-// private keys which properly handles generation of appropriate values.
+// This function primarily exists to provide a mechanism for converting
+// serialized private keys that are already known to be good.
+//
+// Typically callers should make use of GeneratePrivateKey or
+// GeneratePrivateKeyFromRand when creating private keys since they properly
+// handle generation of appropriate values.
func PrivKeyFromBytes(privKeyBytes []byte) *PrivateKey {
var privKey PrivateKey
privKey.Key.SetByteSlice(privKeyBytes)
return &privKey
}
-// GeneratePrivateKey generates and returns a new cryptographically secure
-// private key that is suitable for use with secp256k1.
-func GeneratePrivateKey() (*PrivateKey, error) {
+// generatePrivateKey generates and returns a new private key that is suitable
+// for use with secp256k1 using the provided reader as a source of entropy. The
+// provided reader must be a source of cryptographically secure randomness to
+// avoid weak private keys.
+func generatePrivateKey(rand io.Reader) (*PrivateKey, error) {
// The group order is close enough to 2^256 that there is only roughly a 1
// in 2^128 chance of generating an invalid private key, so this loop will
// virtually never run more than a single iteration in practice.
var key PrivateKey
var b32 [32]byte
for valid := false; !valid; {
- if _, err := csprng.Read(b32[:]); err != nil {
+ if _, err := io.ReadFull(rand, b32[:]); err != nil {
return nil, err
}
@@ -62,6 +70,20 @@ func GeneratePrivateKey() (*PrivateKey, error) {
return &key, nil
}
+// GeneratePrivateKey generates and returns a new cryptographically secure
+// private key that is suitable for use with secp256k1.
+func GeneratePrivateKey() (*PrivateKey, error) {
+ return generatePrivateKey(cryptorand.Reader)
+}
+
+// GeneratePrivateKeyFromRand generates a private key that is suitable for use
+// with secp256k1 using the provided reader as a source of entropy. The
+// provided reader must be a source of cryptographically secure randomness, such
+// as [crypto/rand.Reader], to avoid weak private keys.
+func GeneratePrivateKeyFromRand(rand io.Reader) (*PrivateKey, error) {
+ return generatePrivateKey(rand)
+}
+
// PubKey computes and returns the public key corresponding to this private key.
func (p *PrivateKey) PubKey() *PublicKey {
var result JacobianPoint
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
index 85f6ab071..c245a8951 100644
--- a/vendor/github.com/docker/go-units/size.go
+++ b/vendor/github.com/docker/go-units/size.go
@@ -2,7 +2,6 @@ package units
import (
"fmt"
- "regexp"
"strconv"
"strings"
)
@@ -26,16 +25,17 @@ const (
PiB = 1024 * TiB
)
-type unitMap map[string]int64
+type unitMap map[byte]int64
var (
- decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
- binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
- sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
+ decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB}
+ binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB}
)
-var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
-var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+var (
+ decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+)
func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
i := 0
@@ -89,20 +89,66 @@ func RAMInBytes(size string) (int64, error) {
// Parses the human-readable size string into the amount it represents.
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
- matches := sizeRegex.FindStringSubmatch(sizeStr)
- if len(matches) != 4 {
+ // TODO: rewrite to use strings.Cut if there's a space
+ // once Go < 1.18 is deprecated.
+ sep := strings.LastIndexAny(sizeStr, "01234567890. ")
+ if sep == -1 {
+ // There should be at least a digit.
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
+ var num, sfx string
+ if sizeStr[sep] != ' ' {
+ num = sizeStr[:sep+1]
+ sfx = sizeStr[sep+1:]
+ } else {
+ // Omit the space separator.
+ num = sizeStr[:sep]
+ sfx = sizeStr[sep+1:]
+ }
- size, err := strconv.ParseFloat(matches[1], 64)
+ size, err := strconv.ParseFloat(num, 64)
if err != nil {
return -1, err
}
+ // Backward compatibility: reject negative sizes.
+ if size < 0 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ if len(sfx) == 0 {
+ return int64(size), nil
+ }
- unitPrefix := strings.ToLower(matches[3])
- if mul, ok := uMap[unitPrefix]; ok {
+ // Process the suffix.
+
+ if len(sfx) > 3 { // Too long.
+ goto badSuffix
+ }
+ sfx = strings.ToLower(sfx)
+ // Trivial case: b suffix.
+ if sfx[0] == 'b' {
+ if len(sfx) > 1 { // no extra characters allowed after b.
+ goto badSuffix
+ }
+ return int64(size), nil
+ }
+ // A suffix from the map.
+ if mul, ok := uMap[sfx[0]]; ok {
size *= float64(mul)
+ } else {
+ goto badSuffix
+ }
+
+ // The suffix may have extra "b" or "ib" (e.g. KiB or MB).
+ switch {
+ case len(sfx) == 2 && sfx[1] != 'b':
+ goto badSuffix
+ case len(sfx) == 3 && sfx[1:] != "ib":
+ goto badSuffix
}
return int64(size), nil
+
+badSuffix:
+ return -1, fmt.Errorf("invalid suffix: '%s'", sfx)
}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
index 94ff801df..0cffafa7b 100644
--- a/vendor/github.com/go-logr/logr/.golangci.yaml
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -6,7 +6,6 @@ linters:
disable-all: true
enable:
- asciicheck
- - deadcode
- errcheck
- forcetypeassert
- gocritic
@@ -18,10 +17,8 @@ linters:
- misspell
- revive
- staticcheck
- - structcheck
- typecheck
- unused
- - varcheck
issues:
exclude-use-default: false
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
index 9d92a38f1..99fe8be93 100644
--- a/vendor/github.com/go-logr/logr/discard.go
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -20,35 +20,5 @@ package logr
// used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal.
func Discard() Logger {
- return Logger{
- level: 0,
- sink: discardLogSink{},
- }
-}
-
-// discardLogSink is a LogSink that discards all messages.
-type discardLogSink struct{}
-
-// Verify that it actually implements the interface
-var _ LogSink = discardLogSink{}
-
-func (l discardLogSink) Init(RuntimeInfo) {
-}
-
-func (l discardLogSink) Enabled(int) bool {
- return false
-}
-
-func (l discardLogSink) Info(int, string, ...interface{}) {
-}
-
-func (l discardLogSink) Error(error, string, ...interface{}) {
-}
-
-func (l discardLogSink) WithValues(...interface{}) LogSink {
- return l
-}
-
-func (l discardLogSink) WithName(string) LogSink {
- return l
+ return New(nil)
}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 000000000..e52f0cd01
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,804 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.Formatter.AddCallDepth(1)
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []interface{}) []interface{}
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []interface{}) []interface{}
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []interface{}) []interface{}
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []interface{}
+ valuesStr string
+ depth int
+ opts *Options
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []interface{}
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []interface{}) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{')
+ }
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+ if len(f.valuesStr) > 0 {
+ if continuing {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(',')
+ } else {
+ buf.WriteByte(' ')
+ }
+ }
+ continuing = true
+ buf.WriteString(f.valuesStr)
+ }
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, continuing, true) // escape user-provided keys
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If continuing is
+// true, it assumes that the buffer has previous values and will emit a
+// separator (which depends on the output format) before the first pair it
+// writes. If escapeKeys is true, the keys are assumed to have
+// non-JSON-compatible characters in them and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 || continuing {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(',')
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ if escapeKeys {
+ buf.WriteString(prettyString(k))
+ } else {
+ // this is faster
+ buf.WriteByte('"')
+ buf.WriteString(k)
+ buf.WriteByte('"')
+ }
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(':')
+ } else {
+ buf.WriteByte('=')
+ }
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) pretty(value interface{}) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `""`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(':')
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(',')
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteByte('"')
+ buf.WriteString(name)
+ buf.WriteByte('"')
+ buf.WriteByte(':')
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(':')
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`""`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = ""
+
+func (f Formatter) nonStringKey(v interface{}) string {
+ return fmt.Sprintf("", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v interface{}) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []interface{}) []interface{} {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
+ args := make([]interface{}, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "level", level, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
+ args := make([]interface{}, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr interface{}
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return f.prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []interface{}) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, false, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
index c3b56b3d2..e027aea3f 100644
--- a/vendor/github.com/go-logr/logr/logr.go
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -21,7 +21,7 @@ limitations under the License.
// to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate.
//
-// Usage
+// # Usage
//
// Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main
@@ -30,16 +30,20 @@ limitations under the License.
// "structured logging".
//
// With Go's standard log package, we might write:
-// log.Printf("setting target value %s", targetValue)
+//
+// log.Printf("setting target value %s", targetValue)
//
// With logr's structured logging, we'd write:
-// logger.Info("setting target", "value", targetValue)
+//
+// logger.Info("setting target", "value", targetValue)
//
// Errors are much the same. Instead of:
-// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
//
// We'd write:
-// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
//
// Info() and Error() are very similar, but they are separate methods so that
// LogSink implementations can choose to do things like attach additional
@@ -47,7 +51,7 @@ limitations under the License.
// always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid.
//
-// Verbosity
+// # Verbosity
//
// Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method.
@@ -58,20 +62,22 @@ limitations under the License.
// Error messages do not have a verbosity level and are always logged.
//
// Where we might have written:
-// if flVerbose >= 2 {
-// log.Printf("an unusual thing happened")
-// }
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
//
// We can write:
-// logger.V(2).Info("an unusual thing happened")
//
-// Logger Names
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
//
// Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add
// a subsystem name:
//
-// logger.WithName("compactor").Info("started", "time", time.Now())
+// logger.WithName("compactor").Info("started", "time", time.Now())
//
// The WithName() method returns a new Logger, which can be passed to
// constructors or other functions for further use. Repeated use of WithName()
@@ -82,25 +88,27 @@ limitations under the License.
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc).
//
-// Saved Values
+// # Saved Values
//
// Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object:
//
// With the standard log package, we might write:
-// log.Printf("decided to set field foo to value %q for object %s/%s",
-// targetValue, object.Namespace, object.Name)
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
//
// With logr we'd write:
-// // Elsewhere: set up the logger to log the object name.
-// obj.logger = mainLogger.WithValues(
-// "name", obj.name, "namespace", obj.namespace)
//
-// // later on...
-// obj.logger.Info("setting foo", "value", targetValue)
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
//
-// Best Practices
+// # Best Practices
//
// Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some
@@ -124,15 +132,15 @@ limitations under the License.
// around. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
-// Key Naming Conventions
+// # Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
-// * be human-readable and meaningful (not auto-generated or simple ordinals)
-// * be constant (not dependent on input data)
-// * contain only printable characters
-// * not contain whitespace or punctuation
-// * use lower case for simple keys and lowerCamelCase for more complex ones
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
@@ -141,51 +149,54 @@ limitations under the License.
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
-// * "caller": the calling information (file/line) of a particular log line
-// * "error": the underlying error value in the `Error` method
-// * "level": the log level
-// * "logger": the name of the associated logger
-// * "msg": the log message
-// * "stacktrace": the stack trace associated with a particular log line or
-// error (often from the `Error` message)
-// * "ts": the timestamp for a log line
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
-// Break Glass
+// # Break Glass
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
-// // Underlier exposes access to the underlying logging implementation.
-// // Since callers only have a logr.Logger, they have to know which
-// // implementation is in use, so this interface is less of an abstraction
-// // and more of way to test type conversion.
-// type Underlier interface {
-// GetUnderlying()
-// }
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
//
// Logger grants access to the sink to enable type assertions like this:
-// func DoSomethingWithImpl(log logr.Logger) {
-// if underlier, ok := log.GetSink()(impl.Underlier) {
-// implLogger := underlier.GetUnderlying()
-// ...
-// }
-// }
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
//
// Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy:
-// // WithFooBar changes the foobar parameter in the log sink and returns a
-// // new logger with that modified sink. It does nothing for loggers where
-// // the sink doesn't support that parameter.
-// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
-// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
-// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
-// }
-// return log
-// }
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
//
// Don't use New to construct a new Logger with a LogSink retrieved from an
// existing Logger. Source code attribution might not work correctly and
@@ -201,11 +212,14 @@ import (
)
// New returns a new Logger instance. This is primarily used by libraries
-// implementing LogSink, rather than end users.
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
func New(sink LogSink) Logger {
logger := Logger{}
logger.setSink(sink)
- sink.Init(runtimeInfo)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
return logger
}
@@ -244,7 +258,7 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
- return l.sink.Enabled(l.level)
+ return l.sink != nil && l.sink.Enabled(l.level)
}
// Info logs a non-error message with the given key/value pairs as context.
@@ -254,6 +268,9 @@ func (l Logger) Enabled() bool {
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
+ if l.sink == nil {
+ return
+ }
if l.Enabled() {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
@@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+ if l.sink == nil {
+ return
+ }
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
// level means a log message is less important. Negative V-levels are treated
// as 0.
func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
if level < 0 {
level = 0
}
@@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger {
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithValues(keysAndValues...))
return l
}
@@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
// contain only letters, digits, and hyphens (see the package documentation for
// more information).
func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithName(name))
return l
}
@@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger {
// WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth))
}
@@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger {
// implementation does not support either of these, the original Logger will be
// returned.
func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1))
@@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) {
return helper, l
}
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
@@ -442,7 +482,7 @@ type LogSink interface {
WithName(name string) LogSink
}
-// CallDepthLogSink represents a Logger that knows how to climb the call stack
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
@@ -467,7 +507,7 @@ type CallDepthLogSink interface {
WithCallDepth(depth int) LogSink
}
-// CallStackHelperLogSink represents a Logger that knows how to climb
+// CallStackHelperLogSink represents a LogSink that knows how to climb
// the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach.
diff --git a/vendor/github.com/golang/mock/AUTHORS b/vendor/github.com/golang/mock/AUTHORS
new file mode 100644
index 000000000..660b8ccc8
--- /dev/null
+++ b/vendor/github.com/golang/mock/AUTHORS
@@ -0,0 +1,12 @@
+# This is the official list of GoMock authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Alex Reece
+Google Inc.
diff --git a/vendor/github.com/golang/mock/CONTRIBUTORS b/vendor/github.com/golang/mock/CONTRIBUTORS
new file mode 100644
index 000000000..def849cab
--- /dev/null
+++ b/vendor/github.com/golang/mock/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute (and typically
+# have contributed) code to the gomock repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+#
+# An entry with two email addresses specifies that the
+# first address should be used in the submit logs and
+# that the second address should be recognized as the
+# same person when interacting with Rietveld.
+
+# Please keep the list sorted.
+
+Aaron Jacobs
+Alex Reece
+David Symonds
+Ryan Barrett
diff --git a/vendor/github.com/golang/mock/LICENSE b/vendor/github.com/golang/mock/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/golang/mock/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/mock/mockgen/mockgen.go b/vendor/github.com/golang/mock/mockgen/mockgen.go
new file mode 100644
index 000000000..50487070e
--- /dev/null
+++ b/vendor/github.com/golang/mock/mockgen/mockgen.go
@@ -0,0 +1,701 @@
+// Copyright 2010 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// MockGen generates mock implementations of Go interfaces.
+package main
+
+// TODO: This does not support recursive embedded interfaces.
+// TODO: This does not support embedding package-local interfaces in a separate file.
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/golang/mock/mockgen/model"
+
+ "golang.org/x/mod/modfile"
+ toolsimports "golang.org/x/tools/imports"
+)
+
+const (
+ gomockImportPath = "github.com/golang/mock/gomock"
+)
+
+var (
+ version = ""
+ commit = "none"
+ date = "unknown"
+)
+
+var (
+ source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.")
+ destination = flag.String("destination", "", "Output file; defaults to stdout.")
+ mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.")
+ packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.")
+ selfPackage = flag.String("self_package", "", "The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.")
+ writePkgComment = flag.Bool("write_package_comment", true, "Writes package documentation comment (godoc) if true.")
+ copyrightFile = flag.String("copyright_file", "", "Copyright file used to add copyright header")
+
+ debugParser = flag.Bool("debug_parser", false, "Print out parser results only.")
+ showVersion = flag.Bool("version", false, "Print version.")
+)
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ if *showVersion {
+ printVersion()
+ return
+ }
+
+ var pkg *model.Package
+ var err error
+ var packageName string
+ if *source != "" {
+ pkg, err = sourceMode(*source)
+ } else {
+ if flag.NArg() != 2 {
+ usage()
+ log.Fatal("Expected exactly two arguments")
+ }
+ packageName = flag.Arg(0)
+ interfaces := strings.Split(flag.Arg(1), ",")
+ if packageName == "." {
+ dir, err := os.Getwd()
+ if err != nil {
+ log.Fatalf("Get current directory failed: %v", err)
+ }
+ packageName, err = packageNameOfDir(dir)
+ if err != nil {
+ log.Fatalf("Parse package name failed: %v", err)
+ }
+ }
+ pkg, err = reflectMode(packageName, interfaces)
+ }
+ if err != nil {
+ log.Fatalf("Loading input failed: %v", err)
+ }
+
+ if *debugParser {
+ pkg.Print(os.Stdout)
+ return
+ }
+
+ dst := os.Stdout
+ if len(*destination) > 0 {
+ if err := os.MkdirAll(filepath.Dir(*destination), os.ModePerm); err != nil {
+ log.Fatalf("Unable to create directory: %v", err)
+ }
+ f, err := os.Create(*destination)
+ if err != nil {
+ log.Fatalf("Failed opening destination file: %v", err)
+ }
+ defer f.Close()
+ dst = f
+ }
+
+ outputPackageName := *packageOut
+ if outputPackageName == "" {
+ // pkg.Name in reflect mode is the base name of the import path,
+ // which might have characters that are illegal to have in package names.
+ outputPackageName = "mock_" + sanitize(pkg.Name)
+ }
+
+ // outputPackagePath represents the fully qualified name of the package of
+ // the generated code. Its purposes are to prevent the module from importing
+ // itself and to prevent qualifying type names that come from its own
+ // package (i.e. if there is a type called X then we want to print "X" not
+ // "package.X" since "package" is this package). This can happen if the mock
+ // is output into an already existing package.
+ outputPackagePath := *selfPackage
+ if outputPackagePath == "" && *destination != "" {
+ dstPath, err := filepath.Abs(filepath.Dir(*destination))
+ if err == nil {
+ pkgPath, err := parsePackageImport(dstPath)
+ if err == nil {
+ outputPackagePath = pkgPath
+ } else {
+ log.Println("Unable to infer -self_package from destination file path:", err)
+ }
+ } else {
+ log.Println("Unable to determine destination file path:", err)
+ }
+ }
+
+ g := new(generator)
+ if *source != "" {
+ g.filename = *source
+ } else {
+ g.srcPackage = packageName
+ g.srcInterfaces = flag.Arg(1)
+ }
+ g.destination = *destination
+
+ if *mockNames != "" {
+ g.mockNames = parseMockNames(*mockNames)
+ }
+ if *copyrightFile != "" {
+ header, err := ioutil.ReadFile(*copyrightFile)
+ if err != nil {
+ log.Fatalf("Failed reading copyright file: %v", err)
+ }
+
+ g.copyrightHeader = string(header)
+ }
+ if err := g.Generate(pkg, outputPackageName, outputPackagePath); err != nil {
+ log.Fatalf("Failed generating mock: %v", err)
+ }
+ if _, err := dst.Write(g.Output()); err != nil {
+ log.Fatalf("Failed writing to destination: %v", err)
+ }
+}
+
+func parseMockNames(names string) map[string]string {
+ mocksMap := make(map[string]string)
+ for _, kv := range strings.Split(names, ",") {
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) != 2 || parts[1] == "" {
+ log.Fatalf("bad mock names spec: %v", kv)
+ }
+ mocksMap[parts[0]] = parts[1]
+ }
+ return mocksMap
+}
+
+func usage() {
+ _, _ = io.WriteString(os.Stderr, usageText)
+ flag.PrintDefaults()
+}
+
+const usageText = `mockgen has two modes of operation: source and reflect.
+
+Source mode generates mock interfaces from a source file.
+It is enabled by using the -source flag. Other flags that
+may be useful in this mode are -imports and -aux_files.
+Example:
+ mockgen -source=foo.go [other options]
+
+Reflect mode generates mock interfaces by building a program
+that uses reflection to understand interfaces. It is enabled
+by passing two non-flag arguments: an import path, and a
+comma-separated list of symbols.
+Example:
+ mockgen database/sql/driver Conn,Driver
+
+`
+
+type generator struct {
+ buf bytes.Buffer
+ indent string
+ mockNames map[string]string // may be empty
+ filename string // may be empty
+ destination string // may be empty
+ srcPackage, srcInterfaces string // may be empty
+ copyrightHeader string
+
+ packageMap map[string]string // map from import path to package name
+}
+
+func (g *generator) p(format string, args ...interface{}) {
+ fmt.Fprintf(&g.buf, g.indent+format+"\n", args...)
+}
+
+func (g *generator) in() {
+ g.indent += "\t"
+}
+
+func (g *generator) out() {
+ if len(g.indent) > 0 {
+ g.indent = g.indent[0 : len(g.indent)-1]
+ }
+}
+
+// sanitize cleans up a string to make a suitable package name.
+func sanitize(s string) string {
+ t := ""
+ for _, r := range s {
+ if t == "" {
+ if unicode.IsLetter(r) || r == '_' {
+ t += string(r)
+ continue
+ }
+ } else {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
+ t += string(r)
+ continue
+ }
+ }
+ t += "_"
+ }
+ if t == "_" {
+ t = "x"
+ }
+ return t
+}
+
+func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPackagePath string) error {
+ if outputPkgName != pkg.Name && *selfPackage == "" {
+ // reset outputPackagePath if it's not passed in through -self_package
+ outputPackagePath = ""
+ }
+
+ if g.copyrightHeader != "" {
+ lines := strings.Split(g.copyrightHeader, "\n")
+ for _, line := range lines {
+ g.p("// %s", line)
+ }
+ g.p("")
+ }
+
+ g.p("// Code generated by MockGen. DO NOT EDIT.")
+ if g.filename != "" {
+ g.p("// Source: %v", g.filename)
+ } else {
+ g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces)
+ }
+ g.p("")
+
+ // Get all required imports, and generate unique names for them all.
+ im := pkg.Imports()
+ im[gomockImportPath] = true
+
+ // Only import reflect if it's used. We only use reflect in mocked methods
+ // so only import if any of the mocked interfaces have methods.
+ for _, intf := range pkg.Interfaces {
+ if len(intf.Methods) > 0 {
+ im["reflect"] = true
+ break
+ }
+ }
+
+ // Sort keys to make import alias generation predictable
+ sortedPaths := make([]string, len(im))
+ x := 0
+ for pth := range im {
+ sortedPaths[x] = pth
+ x++
+ }
+ sort.Strings(sortedPaths)
+
+ packagesName := createPackageMap(sortedPaths)
+
+ g.packageMap = make(map[string]string, len(im))
+ localNames := make(map[string]bool, len(im))
+ for _, pth := range sortedPaths {
+ base, ok := packagesName[pth]
+ if !ok {
+ base = sanitize(path.Base(pth))
+ }
+
+ // Local names for an imported package can usually be the basename of the import path.
+ // A couple of situations don't permit that, such as duplicate local names
+ // (e.g. importing "html/template" and "text/template"), or where the basename is
+ // a keyword (e.g. "foo/case").
+ // try base0, base1, ...
+ pkgName := base
+ i := 0
+ for localNames[pkgName] || token.Lookup(pkgName).IsKeyword() {
+ pkgName = base + strconv.Itoa(i)
+ i++
+ }
+
+ // Avoid importing package if source pkg == output pkg
+ if pth == pkg.PkgPath && outputPackagePath == pkg.PkgPath {
+ continue
+ }
+
+ g.packageMap[pth] = pkgName
+ localNames[pkgName] = true
+ }
+
+ if *writePkgComment {
+ g.p("// Package %v is a generated GoMock package.", outputPkgName)
+ }
+ g.p("package %v", outputPkgName)
+ g.p("")
+ g.p("import (")
+ g.in()
+ for pkgPath, pkgName := range g.packageMap {
+ if pkgPath == outputPackagePath {
+ continue
+ }
+ g.p("%v %q", pkgName, pkgPath)
+ }
+ for _, pkgPath := range pkg.DotImports {
+ g.p(". %q", pkgPath)
+ }
+ g.out()
+ g.p(")")
+
+ for _, intf := range pkg.Interfaces {
+ if err := g.GenerateMockInterface(intf, outputPackagePath); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// The name of the mock type to use for the given interface identifier.
+func (g *generator) mockName(typeName string) string {
+ if mockName, ok := g.mockNames[typeName]; ok {
+ return mockName
+ }
+
+ return "Mock" + typeName
+}
+
+func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePath string) error {
+ mockType := g.mockName(intf.Name)
+
+ g.p("")
+ g.p("// %v is a mock of %v interface.", mockType, intf.Name)
+ g.p("type %v struct {", mockType)
+ g.in()
+ g.p("ctrl *gomock.Controller")
+ g.p("recorder *%vMockRecorder", mockType)
+ g.out()
+ g.p("}")
+ g.p("")
+
+ g.p("// %vMockRecorder is the mock recorder for %v.", mockType, mockType)
+ g.p("type %vMockRecorder struct {", mockType)
+ g.in()
+ g.p("mock *%v", mockType)
+ g.out()
+ g.p("}")
+ g.p("")
+
+ g.p("// New%v creates a new mock instance.", mockType)
+ g.p("func New%v(ctrl *gomock.Controller) *%v {", mockType, mockType)
+ g.in()
+ g.p("mock := &%v{ctrl: ctrl}", mockType)
+ g.p("mock.recorder = &%vMockRecorder{mock}", mockType)
+ g.p("return mock")
+ g.out()
+ g.p("}")
+ g.p("")
+
+ // XXX: possible name collision here if someone has EXPECT in their interface.
+ g.p("// EXPECT returns an object that allows the caller to indicate expected use.")
+ g.p("func (m *%v) EXPECT() *%vMockRecorder {", mockType, mockType)
+ g.in()
+ g.p("return m.recorder")
+ g.out()
+ g.p("}")
+
+ g.GenerateMockMethods(mockType, intf, outputPackagePath)
+
+ return nil
+}
+
+type byMethodName []*model.Method
+
+func (b byMethodName) Len() int { return len(b) }
+func (b byMethodName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byMethodName) Less(i, j int) bool { return b[i].Name < b[j].Name }
+
+func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride string) {
+ sort.Sort(byMethodName(intf.Methods))
+ for _, m := range intf.Methods {
+ g.p("")
+ _ = g.GenerateMockMethod(mockType, m, pkgOverride)
+ g.p("")
+ _ = g.GenerateMockRecorderMethod(mockType, m)
+ }
+}
+
+func makeArgString(argNames, argTypes []string) string {
+ args := make([]string, len(argNames))
+ for i, name := range argNames {
+ // specify the type only once for consecutive args of the same type
+ if i+1 < len(argTypes) && argTypes[i] == argTypes[i+1] {
+ args[i] = name
+ } else {
+ args[i] = name + " " + argTypes[i]
+ }
+ }
+ return strings.Join(args, ", ")
+}
+
+// GenerateMockMethod generates a mock method implementation.
+// If non-empty, pkgOverride is the package in which unqualified types reside.
+func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOverride string) error {
+ argNames := g.getArgNames(m)
+ argTypes := g.getArgTypes(m, pkgOverride)
+ argString := makeArgString(argNames, argTypes)
+
+ rets := make([]string, len(m.Out))
+ for i, p := range m.Out {
+ rets[i] = p.Type.String(g.packageMap, pkgOverride)
+ }
+ retString := strings.Join(rets, ", ")
+ if len(rets) > 1 {
+ retString = "(" + retString + ")"
+ }
+ if retString != "" {
+ retString = " " + retString
+ }
+
+ ia := newIdentifierAllocator(argNames)
+ idRecv := ia.allocateIdentifier("m")
+
+ g.p("// %v mocks base method.", m.Name)
+ g.p("func (%v *%v) %v(%v)%v {", idRecv, mockType, m.Name, argString, retString)
+ g.in()
+ g.p("%s.ctrl.T.Helper()", idRecv)
+
+ var callArgs string
+ if m.Variadic == nil {
+ if len(argNames) > 0 {
+ callArgs = ", " + strings.Join(argNames, ", ")
+ }
+ } else {
+ // Non-trivial. The generated code must build a []interface{},
+ // but the variadic argument may be any type.
+ idVarArgs := ia.allocateIdentifier("varargs")
+ idVArg := ia.allocateIdentifier("a")
+ g.p("%s := []interface{}{%s}", idVarArgs, strings.Join(argNames[:len(argNames)-1], ", "))
+ g.p("for _, %s := range %s {", idVArg, argNames[len(argNames)-1])
+ g.in()
+ g.p("%s = append(%s, %s)", idVarArgs, idVarArgs, idVArg)
+ g.out()
+ g.p("}")
+ callArgs = ", " + idVarArgs + "..."
+ }
+ if len(m.Out) == 0 {
+ g.p(`%v.ctrl.Call(%v, %q%v)`, idRecv, idRecv, m.Name, callArgs)
+ } else {
+ idRet := ia.allocateIdentifier("ret")
+ g.p(`%v := %v.ctrl.Call(%v, %q%v)`, idRet, idRecv, idRecv, m.Name, callArgs)
+
+ // Go does not allow "naked" type assertions on nil values, so we use the two-value form here.
+ // The value of that is either (x.(T), true) or (Z, false), where Z is the zero value for T.
+ // Happily, this coincides with the semantics we want here.
+ retNames := make([]string, len(rets))
+ for i, t := range rets {
+ retNames[i] = ia.allocateIdentifier(fmt.Sprintf("ret%d", i))
+ g.p("%s, _ := %s[%d].(%s)", retNames[i], idRet, i, t)
+ }
+ g.p("return " + strings.Join(retNames, ", "))
+ }
+
+ g.out()
+ g.p("}")
+ return nil
+}
+
+func (g *generator) GenerateMockRecorderMethod(mockType string, m *model.Method) error {
+ argNames := g.getArgNames(m)
+
+ var argString string
+ if m.Variadic == nil {
+ argString = strings.Join(argNames, ", ")
+ } else {
+ argString = strings.Join(argNames[:len(argNames)-1], ", ")
+ }
+ if argString != "" {
+ argString += " interface{}"
+ }
+
+ if m.Variadic != nil {
+ if argString != "" {
+ argString += ", "
+ }
+ argString += fmt.Sprintf("%s ...interface{}", argNames[len(argNames)-1])
+ }
+
+ ia := newIdentifierAllocator(argNames)
+ idRecv := ia.allocateIdentifier("mr")
+
+ g.p("// %v indicates an expected call of %v.", m.Name, m.Name)
+ g.p("func (%s *%vMockRecorder) %v(%v) *gomock.Call {", idRecv, mockType, m.Name, argString)
+ g.in()
+ g.p("%s.mock.ctrl.T.Helper()", idRecv)
+
+ var callArgs string
+ if m.Variadic == nil {
+ if len(argNames) > 0 {
+ callArgs = ", " + strings.Join(argNames, ", ")
+ }
+ } else {
+ if len(argNames) == 1 {
+ // Easy: just use ... to push the arguments through.
+ callArgs = ", " + argNames[0] + "..."
+ } else {
+ // Hard: create a temporary slice.
+ idVarArgs := ia.allocateIdentifier("varargs")
+ g.p("%s := append([]interface{}{%s}, %s...)",
+ idVarArgs,
+ strings.Join(argNames[:len(argNames)-1], ", "),
+ argNames[len(argNames)-1])
+ callArgs = ", " + idVarArgs + "..."
+ }
+ }
+ g.p(`return %s.mock.ctrl.RecordCallWithMethodType(%s.mock, "%s", reflect.TypeOf((*%s)(nil).%s)%s)`, idRecv, idRecv, m.Name, mockType, m.Name, callArgs)
+
+ g.out()
+ g.p("}")
+ return nil
+}
+
+func (g *generator) getArgNames(m *model.Method) []string {
+ argNames := make([]string, len(m.In))
+ for i, p := range m.In {
+ name := p.Name
+ if name == "" || name == "_" {
+ name = fmt.Sprintf("arg%d", i)
+ }
+ argNames[i] = name
+ }
+ if m.Variadic != nil {
+ name := m.Variadic.Name
+ if name == "" {
+ name = fmt.Sprintf("arg%d", len(m.In))
+ }
+ argNames = append(argNames, name)
+ }
+ return argNames
+}
+
+func (g *generator) getArgTypes(m *model.Method, pkgOverride string) []string {
+ argTypes := make([]string, len(m.In))
+ for i, p := range m.In {
+ argTypes[i] = p.Type.String(g.packageMap, pkgOverride)
+ }
+ if m.Variadic != nil {
+ argTypes = append(argTypes, "..."+m.Variadic.Type.String(g.packageMap, pkgOverride))
+ }
+ return argTypes
+}
+
+type identifierAllocator map[string]struct{}
+
+func newIdentifierAllocator(taken []string) identifierAllocator {
+ a := make(identifierAllocator, len(taken))
+ for _, s := range taken {
+ a[s] = struct{}{}
+ }
+ return a
+}
+
+func (o identifierAllocator) allocateIdentifier(want string) string {
+ id := want
+ for i := 2; ; i++ {
+ if _, ok := o[id]; !ok {
+ o[id] = struct{}{}
+ return id
+ }
+ id = want + "_" + strconv.Itoa(i)
+ }
+}
+
+// Output returns the generator's output, formatted in the standard Go style.
+func (g *generator) Output() []byte {
+ src, err := toolsimports.Process(g.destination, g.buf.Bytes(), nil)
+ if err != nil {
+ log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String())
+ }
+ return src
+}
+
+// createPackageMap returns a map of import path to package name
+// for specified importPaths.
+func createPackageMap(importPaths []string) map[string]string {
+ var pkg struct {
+ Name string
+ ImportPath string
+ }
+ pkgMap := make(map[string]string)
+ b := bytes.NewBuffer(nil)
+ args := []string{"list", "-json"}
+ args = append(args, importPaths...)
+ cmd := exec.Command("go", args...)
+ cmd.Stdout = b
+ cmd.Run()
+ dec := json.NewDecoder(b)
+ for dec.More() {
+ err := dec.Decode(&pkg)
+ if err != nil {
+ log.Printf("failed to decode 'go list' output: %v", err)
+ continue
+ }
+ pkgMap[pkg.ImportPath] = pkg.Name
+ }
+ return pkgMap
+}
+
+func printVersion() {
+ if version != "" {
+ fmt.Printf("v%s\nCommit: %s\nDate: %s\n", version, commit, date)
+ } else {
+ printModuleVersion()
+ }
+}
+
+// parseImportPackage get package import path via source file
+// an alternative implementation is to use:
+// cfg := &packages.Config{Mode: packages.NeedName, Tests: true, Dir: srcDir}
+// pkgs, err := packages.Load(cfg, "file="+source)
+// However, it will call "go list" and slow down the performance
+func parsePackageImport(srcDir string) (string, error) {
+ moduleMode := os.Getenv("GO111MODULE")
+ // trying to find the module
+ if moduleMode != "off" {
+ currentDir := srcDir
+ for {
+ dat, err := ioutil.ReadFile(filepath.Join(currentDir, "go.mod"))
+ if os.IsNotExist(err) {
+ if currentDir == filepath.Dir(currentDir) {
+ // at the root
+ break
+ }
+ currentDir = filepath.Dir(currentDir)
+ continue
+ } else if err != nil {
+ return "", err
+ }
+ modulePath := modfile.ModulePath(dat)
+ return filepath.ToSlash(filepath.Join(modulePath, strings.TrimPrefix(srcDir, currentDir))), nil
+ }
+ }
+ // fall back to GOPATH mode
+ goPaths := os.Getenv("GOPATH")
+ if goPaths == "" {
+ return "", fmt.Errorf("GOPATH is not set")
+ }
+ goPathList := strings.Split(goPaths, string(os.PathListSeparator))
+ for _, goPath := range goPathList {
+ sourceRoot := filepath.Join(goPath, "src") + string(os.PathSeparator)
+ if strings.HasPrefix(srcDir, sourceRoot) {
+ return filepath.ToSlash(strings.TrimPrefix(srcDir, sourceRoot)), nil
+ }
+ }
+ return "", errOutsideGoPath
+}
diff --git a/vendor/github.com/golang/mock/mockgen/model/model.go b/vendor/github.com/golang/mock/mockgen/model/model.go
new file mode 100644
index 000000000..2c6a62ceb
--- /dev/null
+++ b/vendor/github.com/golang/mock/mockgen/model/model.go
@@ -0,0 +1,495 @@
+// Copyright 2012 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains the data model necessary for generating mock implementations.
+package model
+
+import (
+ "encoding/gob"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// pkgPath is the importable path for package model
+const pkgPath = "github.com/golang/mock/mockgen/model"
+
+// Package is a Go package. It may be a subset.
+type Package struct {
+ Name string
+ PkgPath string
+ Interfaces []*Interface
+ DotImports []string
+}
+
+// Print writes the package name and its exported interfaces.
+func (pkg *Package) Print(w io.Writer) {
+ _, _ = fmt.Fprintf(w, "package %s\n", pkg.Name)
+ for _, intf := range pkg.Interfaces {
+ intf.Print(w)
+ }
+}
+
+// Imports returns the imports needed by the Package as a set of import paths.
+func (pkg *Package) Imports() map[string]bool {
+ im := make(map[string]bool)
+ for _, intf := range pkg.Interfaces {
+ intf.addImports(im)
+ }
+ return im
+}
+
+// Interface is a Go interface.
+type Interface struct {
+ Name string
+ Methods []*Method
+}
+
+// Print writes the interface name and its methods.
+func (intf *Interface) Print(w io.Writer) {
+ _, _ = fmt.Fprintf(w, "interface %s\n", intf.Name)
+ for _, m := range intf.Methods {
+ m.Print(w)
+ }
+}
+
+func (intf *Interface) addImports(im map[string]bool) {
+ for _, m := range intf.Methods {
+ m.addImports(im)
+ }
+}
+
+// AddMethod adds a new method, de-duplicating by method name.
+func (intf *Interface) AddMethod(m *Method) {
+ for _, me := range intf.Methods {
+ if me.Name == m.Name {
+ return
+ }
+ }
+ intf.Methods = append(intf.Methods, m)
+}
+
+// Method is a single method of an interface.
+type Method struct {
+ Name string
+ In, Out []*Parameter
+ Variadic *Parameter // may be nil
+}
+
+// Print writes the method name and its signature.
+func (m *Method) Print(w io.Writer) {
+ _, _ = fmt.Fprintf(w, " - method %s\n", m.Name)
+ if len(m.In) > 0 {
+ _, _ = fmt.Fprintf(w, " in:\n")
+ for _, p := range m.In {
+ p.Print(w)
+ }
+ }
+ if m.Variadic != nil {
+ _, _ = fmt.Fprintf(w, " ...:\n")
+ m.Variadic.Print(w)
+ }
+ if len(m.Out) > 0 {
+ _, _ = fmt.Fprintf(w, " out:\n")
+ for _, p := range m.Out {
+ p.Print(w)
+ }
+ }
+}
+
+func (m *Method) addImports(im map[string]bool) {
+ for _, p := range m.In {
+ p.Type.addImports(im)
+ }
+ if m.Variadic != nil {
+ m.Variadic.Type.addImports(im)
+ }
+ for _, p := range m.Out {
+ p.Type.addImports(im)
+ }
+}
+
+// Parameter is an argument or return parameter of a method.
+type Parameter struct {
+ Name string // may be empty
+ Type Type
+}
+
+// Print writes a method parameter.
+func (p *Parameter) Print(w io.Writer) {
+ n := p.Name
+ if n == "" {
+ n = `""`
+ }
+ _, _ = fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, ""))
+}
+
+// Type is a Go type.
+type Type interface {
+ String(pm map[string]string, pkgOverride string) string
+ addImports(im map[string]bool)
+}
+
+func init() {
+ gob.Register(&ArrayType{})
+ gob.Register(&ChanType{})
+ gob.Register(&FuncType{})
+ gob.Register(&MapType{})
+ gob.Register(&NamedType{})
+ gob.Register(&PointerType{})
+
+ // Call gob.RegisterName to make sure it has the consistent name registered
+ // for both gob decoder and encoder.
+ //
+ // For a non-pointer type, gob.Register will try to get package full path by
+ // calling rt.PkgPath() for a name to register. If your project has vendor
+ // directory, it is possible that PkgPath will get a path like this:
+ // ../../../vendor/github.com/golang/mock/mockgen/model
+ gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType(""))
+}
+
+// ArrayType is an array or slice type.
+type ArrayType struct {
+ Len int // -1 for slices, >= 0 for arrays
+ Type Type
+}
+
+func (at *ArrayType) String(pm map[string]string, pkgOverride string) string {
+ s := "[]"
+ if at.Len > -1 {
+ s = fmt.Sprintf("[%d]", at.Len)
+ }
+ return s + at.Type.String(pm, pkgOverride)
+}
+
+func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) }
+
+// ChanType is a channel type.
+type ChanType struct {
+ Dir ChanDir // 0, 1 or 2
+ Type Type
+}
+
+func (ct *ChanType) String(pm map[string]string, pkgOverride string) string {
+ s := ct.Type.String(pm, pkgOverride)
+ if ct.Dir == RecvDir {
+ return "<-chan " + s
+ }
+ if ct.Dir == SendDir {
+ return "chan<- " + s
+ }
+ return "chan " + s
+}
+
+func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) }
+
+// ChanDir is a channel direction.
+type ChanDir int
+
+// Constants for channel directions.
+const (
+ RecvDir ChanDir = 1
+ SendDir ChanDir = 2
+)
+
+// FuncType is a function type.
+type FuncType struct {
+ In, Out []*Parameter
+ Variadic *Parameter // may be nil
+}
+
+func (ft *FuncType) String(pm map[string]string, pkgOverride string) string {
+ args := make([]string, len(ft.In))
+ for i, p := range ft.In {
+ args[i] = p.Type.String(pm, pkgOverride)
+ }
+ if ft.Variadic != nil {
+ args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride))
+ }
+ rets := make([]string, len(ft.Out))
+ for i, p := range ft.Out {
+ rets[i] = p.Type.String(pm, pkgOverride)
+ }
+ retString := strings.Join(rets, ", ")
+ if nOut := len(ft.Out); nOut == 1 {
+ retString = " " + retString
+ } else if nOut > 1 {
+ retString = " (" + retString + ")"
+ }
+ return "func(" + strings.Join(args, ", ") + ")" + retString
+}
+
+func (ft *FuncType) addImports(im map[string]bool) {
+ for _, p := range ft.In {
+ p.Type.addImports(im)
+ }
+ if ft.Variadic != nil {
+ ft.Variadic.Type.addImports(im)
+ }
+ for _, p := range ft.Out {
+ p.Type.addImports(im)
+ }
+}
+
+// MapType is a map type.
+type MapType struct {
+ Key, Value Type
+}
+
+func (mt *MapType) String(pm map[string]string, pkgOverride string) string {
+ return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride)
+}
+
+func (mt *MapType) addImports(im map[string]bool) {
+ mt.Key.addImports(im)
+ mt.Value.addImports(im)
+}
+
+// NamedType is an exported type in a package.
+type NamedType struct {
+ Package string // may be empty
+ Type string
+}
+
+func (nt *NamedType) String(pm map[string]string, pkgOverride string) string {
+ if pkgOverride == nt.Package {
+ return nt.Type
+ }
+ prefix := pm[nt.Package]
+ if prefix != "" {
+ return prefix + "." + nt.Type
+ }
+
+ return nt.Type
+}
+
+func (nt *NamedType) addImports(im map[string]bool) {
+ if nt.Package != "" {
+ im[nt.Package] = true
+ }
+}
+
+// PointerType is a pointer to another type.
+type PointerType struct {
+ Type Type
+}
+
+func (pt *PointerType) String(pm map[string]string, pkgOverride string) string {
+ return "*" + pt.Type.String(pm, pkgOverride)
+}
+func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) }
+
+// PredeclaredType is a predeclared type such as "int".
+type PredeclaredType string
+
+func (pt PredeclaredType) String(map[string]string, string) string { return string(pt) }
+func (pt PredeclaredType) addImports(map[string]bool) {}
+
+// The following code is intended to be called by the program generated by ../reflect.go.
+
+// InterfaceFromInterfaceType returns a pointer to an interface for the
+// given reflection interface type.
+func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) {
+ if it.Kind() != reflect.Interface {
+ return nil, fmt.Errorf("%v is not an interface", it)
+ }
+ intf := &Interface{}
+
+ for i := 0; i < it.NumMethod(); i++ {
+ mt := it.Method(i)
+ // TODO: need to skip unexported methods? or just raise an error?
+ m := &Method{
+ Name: mt.Name,
+ }
+
+ var err error
+ m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type)
+ if err != nil {
+ return nil, err
+ }
+
+ intf.AddMethod(m)
+ }
+
+ return intf, nil
+}
+
+// t's Kind must be a reflect.Func.
+func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) {
+ nin := t.NumIn()
+ if t.IsVariadic() {
+ nin--
+ }
+ var p *Parameter
+ for i := 0; i < nin; i++ {
+ p, err = parameterFromType(t.In(i))
+ if err != nil {
+ return
+ }
+ in = append(in, p)
+ }
+ if t.IsVariadic() {
+ p, err = parameterFromType(t.In(nin).Elem())
+ if err != nil {
+ return
+ }
+ variadic = p
+ }
+ for i := 0; i < t.NumOut(); i++ {
+ p, err = parameterFromType(t.Out(i))
+ if err != nil {
+ return
+ }
+ out = append(out, p)
+ }
+ return
+}
+
+func parameterFromType(t reflect.Type) (*Parameter, error) {
+ tt, err := typeFromType(t)
+ if err != nil {
+ return nil, err
+ }
+ return &Parameter{Type: tt}, nil
+}
+
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+var byteType = reflect.TypeOf(byte(0))
+
+func typeFromType(t reflect.Type) (Type, error) {
+ // Hack workaround for https://golang.org/issue/3853.
+ // This explicit check should not be necessary.
+ if t == byteType {
+ return PredeclaredType("byte"), nil
+ }
+
+ if imp := t.PkgPath(); imp != "" {
+ return &NamedType{
+ Package: impPath(imp),
+ Type: t.Name(),
+ }, nil
+ }
+
+ // only unnamed or predeclared types after here
+
+ // Lots of types have element types. Let's do the parsing and error checking for all of them.
+ var elemType Type
+ switch t.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
+ var err error
+ elemType, err = typeFromType(t.Elem())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Array:
+ return &ArrayType{
+ Len: t.Len(),
+ Type: elemType,
+ }, nil
+ case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String:
+ return PredeclaredType(t.Kind().String()), nil
+ case reflect.Chan:
+ var dir ChanDir
+ switch t.ChanDir() {
+ case reflect.RecvDir:
+ dir = RecvDir
+ case reflect.SendDir:
+ dir = SendDir
+ }
+ return &ChanType{
+ Dir: dir,
+ Type: elemType,
+ }, nil
+ case reflect.Func:
+ in, variadic, out, err := funcArgsFromType(t)
+ if err != nil {
+ return nil, err
+ }
+ return &FuncType{
+ In: in,
+ Out: out,
+ Variadic: variadic,
+ }, nil
+ case reflect.Interface:
+ // Two special interfaces.
+ if t.NumMethod() == 0 {
+ return PredeclaredType("interface{}"), nil
+ }
+ if t == errorType {
+ return PredeclaredType("error"), nil
+ }
+ case reflect.Map:
+ kt, err := typeFromType(t.Key())
+ if err != nil {
+ return nil, err
+ }
+ return &MapType{
+ Key: kt,
+ Value: elemType,
+ }, nil
+ case reflect.Ptr:
+ return &PointerType{
+ Type: elemType,
+ }, nil
+ case reflect.Slice:
+ return &ArrayType{
+ Len: -1,
+ Type: elemType,
+ }, nil
+ case reflect.Struct:
+ if t.NumField() == 0 {
+ return PredeclaredType("struct{}"), nil
+ }
+ }
+
+ // TODO: Struct, UnsafePointer
+ return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind())
+}
+
+// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that
+// it is importable. PkgPath might return a path that includes "vendor". These paths do not
+// compile, so we need to remove everything up to and including "/vendor/".
+// See https://github.com/golang/go/issues/12019.
+func impPath(imp string) string {
+ if strings.HasPrefix(imp, "vendor/") {
+ imp = "/" + imp
+ }
+ if i := strings.LastIndex(imp, "/vendor/"); i != -1 {
+ imp = imp[i+len("/vendor/"):]
+ }
+ return imp
+}
+
+// ErrorInterface represent built-in error interface.
+var ErrorInterface = Interface{
+ Name: "error",
+ Methods: []*Method{
+ {
+ Name: "Error",
+ Out: []*Parameter{
+ {
+ Name: "",
+ Type: PredeclaredType("string"),
+ },
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/golang/mock/mockgen/parse.go b/vendor/github.com/golang/mock/mockgen/parse.go
new file mode 100644
index 000000000..bf6902cd5
--- /dev/null
+++ b/vendor/github.com/golang/mock/mockgen/parse.go
@@ -0,0 +1,644 @@
+// Copyright 2012 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+// This file contains the model construction by parsing source files.
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "path"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/golang/mock/mockgen/model"
+)
+
+var (
+ imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.")
+ auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.")
+)
+
+// sourceMode generates mocks via source file.
+func sourceMode(source string) (*model.Package, error) {
+ srcDir, err := filepath.Abs(filepath.Dir(source))
+ if err != nil {
+ return nil, fmt.Errorf("failed getting source directory: %v", err)
+ }
+
+ packageImport, err := parsePackageImport(srcDir)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := token.NewFileSet()
+ file, err := parser.ParseFile(fs, source, nil, 0)
+ if err != nil {
+ return nil, fmt.Errorf("failed parsing source file %v: %v", source, err)
+ }
+
+ p := &fileParser{
+ fileSet: fs,
+ imports: make(map[string]importedPackage),
+ importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
+ auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
+ srcDir: srcDir,
+ }
+
+ // Handle -imports.
+ dotImports := make(map[string]bool)
+ if *imports != "" {
+ for _, kv := range strings.Split(*imports, ",") {
+ eq := strings.Index(kv, "=")
+ k, v := kv[:eq], kv[eq+1:]
+ if k == "." {
+ dotImports[v] = true
+ } else {
+ p.imports[k] = importedPkg{path: v}
+ }
+ }
+ }
+
+ // Handle -aux_files.
+ if err := p.parseAuxFiles(*auxFiles); err != nil {
+ return nil, err
+ }
+ p.addAuxInterfacesFromFile(packageImport, file) // this file
+
+ pkg, err := p.parseFile(packageImport, file)
+ if err != nil {
+ return nil, err
+ }
+ for pkgPath := range dotImports {
+ pkg.DotImports = append(pkg.DotImports, pkgPath)
+ }
+ return pkg, nil
+}
+
+type importedPackage interface {
+ Path() string
+ Parser() *fileParser
+}
+
+type importedPkg struct {
+ path string
+ parser *fileParser
+}
+
+func (i importedPkg) Path() string { return i.path }
+func (i importedPkg) Parser() *fileParser { return i.parser }
+
+// duplicateImport is a bit of a misnomer. Currently the parser can't
+// handle cases of multi-file packages importing different packages
+// under the same name. Often these imports would not be problematic,
+// so this type lets us defer raising an error unless the package name
+// is actually used.
+type duplicateImport struct {
+ name string
+ duplicates []string
+}
+
+func (d duplicateImport) Error() string {
+ return fmt.Sprintf("%q is ambiguous because of duplicate imports: %v", d.name, d.duplicates)
+}
+
+func (d duplicateImport) Path() string { log.Fatal(d.Error()); return "" }
+func (d duplicateImport) Parser() *fileParser { log.Fatal(d.Error()); return nil }
+
+type fileParser struct {
+ fileSet *token.FileSet
+ imports map[string]importedPackage // package name => imported package
+ importedInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
+
+ auxFiles []*ast.File
+ auxInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
+
+ srcDir string
+}
+
+func (p *fileParser) errorf(pos token.Pos, format string, args ...interface{}) error {
+ ps := p.fileSet.Position(pos)
+ format = "%s:%d:%d: " + format
+ args = append([]interface{}{ps.Filename, ps.Line, ps.Column}, args...)
+ return fmt.Errorf(format, args...)
+}
+
+func (p *fileParser) parseAuxFiles(auxFiles string) error {
+ auxFiles = strings.TrimSpace(auxFiles)
+ if auxFiles == "" {
+ return nil
+ }
+ for _, kv := range strings.Split(auxFiles, ",") {
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("bad aux file spec: %v", kv)
+ }
+ pkg, fpath := parts[0], parts[1]
+
+ file, err := parser.ParseFile(p.fileSet, fpath, nil, 0)
+ if err != nil {
+ return err
+ }
+ p.auxFiles = append(p.auxFiles, file)
+ p.addAuxInterfacesFromFile(pkg, file)
+ }
+ return nil
+}
+
+func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) {
+ if _, ok := p.auxInterfaces[pkg]; !ok {
+ p.auxInterfaces[pkg] = make(map[string]*ast.InterfaceType)
+ }
+ for ni := range iterInterfaces(file) {
+ p.auxInterfaces[pkg][ni.name.Name] = ni.it
+ }
+}
+
+// parseFile loads all file imports and auxiliary files import into the
+// fileParser, parses all file interfaces and returns package model.
+func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Package, error) {
+ allImports, dotImports := importsOfFile(file)
+ // Don't stomp imports provided by -imports. Those should take precedence.
+ for pkg, pkgI := range allImports {
+ if _, ok := p.imports[pkg]; !ok {
+ p.imports[pkg] = pkgI
+ }
+ }
+ // Add imports from auxiliary files, which might be needed for embedded interfaces.
+ // Don't stomp any other imports.
+ for _, f := range p.auxFiles {
+ auxImports, _ := importsOfFile(f)
+ for pkg, pkgI := range auxImports {
+ if _, ok := p.imports[pkg]; !ok {
+ p.imports[pkg] = pkgI
+ }
+ }
+ }
+
+ var is []*model.Interface
+ for ni := range iterInterfaces(file) {
+ i, err := p.parseInterface(ni.name.String(), importPath, ni.it)
+ if err != nil {
+ return nil, err
+ }
+ is = append(is, i)
+ }
+ return &model.Package{
+ Name: file.Name.String(),
+ PkgPath: importPath,
+ Interfaces: is,
+ DotImports: dotImports,
+ }, nil
+}
+
+// parsePackage loads package specified by path, parses it and returns
+// a new fileParser with the parsed imports and interfaces.
+func (p *fileParser) parsePackage(path string) (*fileParser, error) {
+ newP := &fileParser{
+ fileSet: token.NewFileSet(),
+ imports: make(map[string]importedPackage),
+ importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
+ auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
+ srcDir: p.srcDir,
+ }
+
+ var pkgs map[string]*ast.Package
+ if imp, err := build.Import(path, newP.srcDir, build.FindOnly); err != nil {
+ return nil, err
+ } else if pkgs, err = parser.ParseDir(newP.fileSet, imp.Dir, nil, 0); err != nil {
+ return nil, err
+ }
+
+ for _, pkg := range pkgs {
+ file := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates|ast.FilterUnassociatedComments|ast.FilterImportDuplicates)
+ if _, ok := newP.importedInterfaces[path]; !ok {
+ newP.importedInterfaces[path] = make(map[string]*ast.InterfaceType)
+ }
+ for ni := range iterInterfaces(file) {
+ newP.importedInterfaces[path][ni.name.Name] = ni.it
+ }
+ imports, _ := importsOfFile(file)
+ for pkgName, pkgI := range imports {
+ newP.imports[pkgName] = pkgI
+ }
+ }
+ return newP, nil
+}
+
+func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*model.Interface, error) {
+ iface := &model.Interface{Name: name}
+ for _, field := range it.Methods.List {
+ switch v := field.Type.(type) {
+ case *ast.FuncType:
+ if nn := len(field.Names); nn != 1 {
+ return nil, fmt.Errorf("expected one name for interface %v, got %d", iface.Name, nn)
+ }
+ m := &model.Method{
+ Name: field.Names[0].String(),
+ }
+ var err error
+ m.In, m.Variadic, m.Out, err = p.parseFunc(pkg, v)
+ if err != nil {
+ return nil, err
+ }
+ iface.AddMethod(m)
+ case *ast.Ident:
+ // Embedded interface in this package.
+ embeddedIfaceType := p.auxInterfaces[pkg][v.String()]
+ if embeddedIfaceType == nil {
+ embeddedIfaceType = p.importedInterfaces[pkg][v.String()]
+ }
+
+ var embeddedIface *model.Interface
+ if embeddedIfaceType != nil {
+ var err error
+ embeddedIface, err = p.parseInterface(v.String(), pkg, embeddedIfaceType)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // This is built-in error interface.
+ if v.String() == model.ErrorInterface.Name {
+ embeddedIface = &model.ErrorInterface
+ } else {
+ return nil, p.errorf(v.Pos(), "unknown embedded interface %s", v.String())
+ }
+ }
+ // Copy the methods.
+ for _, m := range embeddedIface.Methods {
+ iface.AddMethod(m)
+ }
+ case *ast.SelectorExpr:
+ // Embedded interface in another package.
+ filePkg, sel := v.X.(*ast.Ident).String(), v.Sel.String()
+ embeddedPkg, ok := p.imports[filePkg]
+ if !ok {
+ return nil, p.errorf(v.X.Pos(), "unknown package %s", filePkg)
+ }
+
+ var embeddedIface *model.Interface
+ var err error
+ embeddedIfaceType := p.auxInterfaces[filePkg][sel]
+ if embeddedIfaceType != nil {
+ embeddedIface, err = p.parseInterface(sel, filePkg, embeddedIfaceType)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ path := embeddedPkg.Path()
+ parser := embeddedPkg.Parser()
+ if parser == nil {
+ ip, err := p.parsePackage(path)
+ if err != nil {
+ return nil, p.errorf(v.Pos(), "could not parse package %s: %v", path, err)
+ }
+ parser = ip
+ p.imports[filePkg] = importedPkg{
+ path: embeddedPkg.Path(),
+ parser: parser,
+ }
+ }
+ if embeddedIfaceType = parser.importedInterfaces[path][sel]; embeddedIfaceType == nil {
+ return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", path, sel)
+ }
+ embeddedIface, err = parser.parseInterface(sel, path, embeddedIfaceType)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Copy the methods.
+ // TODO: apply shadowing rules.
+ for _, m := range embeddedIface.Methods {
+ iface.AddMethod(m)
+ }
+ default:
+ return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type)
+ }
+ }
+ return iface, nil
+}
+
+func (p *fileParser) parseFunc(pkg string, f *ast.FuncType) (inParam []*model.Parameter, variadic *model.Parameter, outParam []*model.Parameter, err error) {
+ if f.Params != nil {
+ regParams := f.Params.List
+ if isVariadic(f) {
+ n := len(regParams)
+ varParams := regParams[n-1:]
+ regParams = regParams[:n-1]
+ vp, err := p.parseFieldList(pkg, varParams)
+ if err != nil {
+ return nil, nil, nil, p.errorf(varParams[0].Pos(), "failed parsing variadic argument: %v", err)
+ }
+ variadic = vp[0]
+ }
+ inParam, err = p.parseFieldList(pkg, regParams)
+ if err != nil {
+ return nil, nil, nil, p.errorf(f.Pos(), "failed parsing arguments: %v", err)
+ }
+ }
+ if f.Results != nil {
+ outParam, err = p.parseFieldList(pkg, f.Results.List)
+ if err != nil {
+ return nil, nil, nil, p.errorf(f.Pos(), "failed parsing returns: %v", err)
+ }
+ }
+ return
+}
+
+func (p *fileParser) parseFieldList(pkg string, fields []*ast.Field) ([]*model.Parameter, error) {
+ nf := 0
+ for _, f := range fields {
+ nn := len(f.Names)
+ if nn == 0 {
+ nn = 1 // anonymous parameter
+ }
+ nf += nn
+ }
+ if nf == 0 {
+ return nil, nil
+ }
+ ps := make([]*model.Parameter, nf)
+ i := 0 // destination index
+ for _, f := range fields {
+ t, err := p.parseType(pkg, f.Type)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(f.Names) == 0 {
+ // anonymous arg
+ ps[i] = &model.Parameter{Type: t}
+ i++
+ continue
+ }
+ for _, name := range f.Names {
+ ps[i] = &model.Parameter{Name: name.Name, Type: t}
+ i++
+ }
+ }
+ return ps, nil
+}
+
+func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) {
+ switch v := typ.(type) {
+ case *ast.ArrayType:
+ ln := -1
+ if v.Len != nil {
+ var value string
+ switch val := v.Len.(type) {
+ case (*ast.BasicLit):
+ value = val.Value
+ case (*ast.Ident):
+ // when the length is a const defined locally
+ value = val.Obj.Decl.(*ast.ValueSpec).Values[0].(*ast.BasicLit).Value
+ case (*ast.SelectorExpr):
+ // when the length is a const defined in an external package
+ usedPkg, err := importer.Default().Import(fmt.Sprintf("%s", val.X))
+ if err != nil {
+ return nil, p.errorf(v.Len.Pos(), "unknown package in array length: %v", err)
+ }
+ ev, err := types.Eval(token.NewFileSet(), usedPkg, token.NoPos, val.Sel.Name)
+ if err != nil {
+ return nil, p.errorf(v.Len.Pos(), "unknown constant in array length: %v", err)
+ }
+ value = ev.Value.String()
+ }
+
+ x, err := strconv.Atoi(value)
+ if err != nil {
+ return nil, p.errorf(v.Len.Pos(), "bad array size: %v", err)
+ }
+ ln = x
+ }
+ t, err := p.parseType(pkg, v.Elt)
+ if err != nil {
+ return nil, err
+ }
+ return &model.ArrayType{Len: ln, Type: t}, nil
+ case *ast.ChanType:
+ t, err := p.parseType(pkg, v.Value)
+ if err != nil {
+ return nil, err
+ }
+ var dir model.ChanDir
+ if v.Dir == ast.SEND {
+ dir = model.SendDir
+ }
+ if v.Dir == ast.RECV {
+ dir = model.RecvDir
+ }
+ return &model.ChanType{Dir: dir, Type: t}, nil
+ case *ast.Ellipsis:
+ // assume we're parsing a variadic argument
+ return p.parseType(pkg, v.Elt)
+ case *ast.FuncType:
+ in, variadic, out, err := p.parseFunc(pkg, v)
+ if err != nil {
+ return nil, err
+ }
+ return &model.FuncType{In: in, Out: out, Variadic: variadic}, nil
+ case *ast.Ident:
+ if v.IsExported() {
+ // `pkg` may be an aliased imported pkg
+ // if so, patch the import w/ the fully qualified import
+ maybeImportedPkg, ok := p.imports[pkg]
+ if ok {
+ pkg = maybeImportedPkg.Path()
+ }
+ // assume type in this package
+ return &model.NamedType{Package: pkg, Type: v.Name}, nil
+ }
+
+ // assume predeclared type
+ return model.PredeclaredType(v.Name), nil
+ case *ast.InterfaceType:
+ if v.Methods != nil && len(v.Methods.List) > 0 {
+ return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed interface types")
+ }
+ return model.PredeclaredType("interface{}"), nil
+ case *ast.MapType:
+ key, err := p.parseType(pkg, v.Key)
+ if err != nil {
+ return nil, err
+ }
+ value, err := p.parseType(pkg, v.Value)
+ if err != nil {
+ return nil, err
+ }
+ return &model.MapType{Key: key, Value: value}, nil
+ case *ast.SelectorExpr:
+ pkgName := v.X.(*ast.Ident).String()
+ pkg, ok := p.imports[pkgName]
+ if !ok {
+ return nil, p.errorf(v.Pos(), "unknown package %q", pkgName)
+ }
+ return &model.NamedType{Package: pkg.Path(), Type: v.Sel.String()}, nil
+ case *ast.StarExpr:
+ t, err := p.parseType(pkg, v.X)
+ if err != nil {
+ return nil, err
+ }
+ return &model.PointerType{Type: t}, nil
+ case *ast.StructType:
+ if v.Fields != nil && len(v.Fields.List) > 0 {
+ return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types")
+ }
+ return model.PredeclaredType("struct{}"), nil
+ case *ast.ParenExpr:
+ return p.parseType(pkg, v.X)
+ }
+
+ return nil, fmt.Errorf("don't know how to parse type %T", typ)
+}
+
+// importsOfFile returns a map of package name to import path
+// of the imports in file.
+func importsOfFile(file *ast.File) (normalImports map[string]importedPackage, dotImports []string) {
+ var importPaths []string
+ for _, is := range file.Imports {
+ if is.Name != nil {
+ continue
+ }
+ importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
+ importPaths = append(importPaths, importPath)
+ }
+ packagesName := createPackageMap(importPaths)
+ normalImports = make(map[string]importedPackage)
+ dotImports = make([]string, 0)
+ for _, is := range file.Imports {
+ var pkgName string
+ importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
+
+ if is.Name != nil {
+ // Named imports are always certain.
+ if is.Name.Name == "_" {
+ continue
+ }
+ pkgName = is.Name.Name
+ } else {
+ pkg, ok := packagesName[importPath]
+ if !ok {
+ // Fallback to import path suffix. Note that this is uncertain.
+ _, last := path.Split(importPath)
+ // If the last path component has dots, the first dot-delimited
+ // field is used as the name.
+ pkgName = strings.SplitN(last, ".", 2)[0]
+ } else {
+ pkgName = pkg
+ }
+ }
+
+ if pkgName == "." {
+ dotImports = append(dotImports, importPath)
+ } else {
+ if pkg, ok := normalImports[pkgName]; ok {
+ switch p := pkg.(type) {
+ case duplicateImport:
+ normalImports[pkgName] = duplicateImport{
+ name: p.name,
+ duplicates: append([]string{importPath}, p.duplicates...),
+ }
+ case importedPkg:
+ normalImports[pkgName] = duplicateImport{
+ name: pkgName,
+ duplicates: []string{p.path, importPath},
+ }
+ }
+ } else {
+ normalImports[pkgName] = importedPkg{path: importPath}
+ }
+ }
+ }
+ return
+}
+
+type namedInterface struct {
+ name *ast.Ident
+ it *ast.InterfaceType
+}
+
+// Create an iterator over all interfaces in file.
+func iterInterfaces(file *ast.File) <-chan namedInterface {
+ ch := make(chan namedInterface)
+ go func() {
+ for _, decl := range file.Decls {
+ gd, ok := decl.(*ast.GenDecl)
+ if !ok || gd.Tok != token.TYPE {
+ continue
+ }
+ for _, spec := range gd.Specs {
+ ts, ok := spec.(*ast.TypeSpec)
+ if !ok {
+ continue
+ }
+ it, ok := ts.Type.(*ast.InterfaceType)
+ if !ok {
+ continue
+ }
+
+ ch <- namedInterface{ts.Name, it}
+ }
+ }
+ close(ch)
+ }()
+ return ch
+}
+
+// isVariadic returns whether the function is variadic.
+func isVariadic(f *ast.FuncType) bool {
+ nargs := len(f.Params.List)
+ if nargs == 0 {
+ return false
+ }
+ _, ok := f.Params.List[nargs-1].Type.(*ast.Ellipsis)
+ return ok
+}
+
+// packageNameOfDir get package import path via dir
+func packageNameOfDir(srcDir string) (string, error) {
+ files, err := ioutil.ReadDir(srcDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var goFilePath string
+ for _, file := range files {
+ if !file.IsDir() && strings.HasSuffix(file.Name(), ".go") {
+ goFilePath = file.Name()
+ break
+ }
+ }
+ if goFilePath == "" {
+ return "", fmt.Errorf("go source file not found %s", srcDir)
+ }
+
+ packageImport, err := parsePackageImport(srcDir)
+ if err != nil {
+ return "", err
+ }
+ return packageImport, nil
+}
+
+var errOutsideGoPath = errors.New("source directory is outside GOPATH")
diff --git a/vendor/github.com/golang/mock/mockgen/reflect.go b/vendor/github.com/golang/mock/mockgen/reflect.go
new file mode 100644
index 000000000..e24efce0b
--- /dev/null
+++ b/vendor/github.com/golang/mock/mockgen/reflect.go
@@ -0,0 +1,256 @@
+// Copyright 2012 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+// This file contains the model construction by reflection.
+
+import (
+ "bytes"
+ "encoding/gob"
+ "flag"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "text/template"
+
+ "github.com/golang/mock/mockgen/model"
+)
+
+var (
+ progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout and exit.")
+ execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.")
+ buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.")
+)
+
+// reflectMode generates mocks via reflection on an interface.
+func reflectMode(importPath string, symbols []string) (*model.Package, error) {
+ if *execOnly != "" {
+ return run(*execOnly)
+ }
+
+ program, err := writeProgram(importPath, symbols)
+ if err != nil {
+ return nil, err
+ }
+
+ if *progOnly {
+ if _, err := os.Stdout.Write(program); err != nil {
+ return nil, err
+ }
+ os.Exit(0)
+ }
+
+ wd, _ := os.Getwd()
+
+ // Try to run the reflection program in the current working directory.
+ if p, err := runInDir(program, wd); err == nil {
+ return p, nil
+ }
+
+ // Try to run the program in the same directory as the input package.
+ if p, err := build.Import(importPath, wd, build.FindOnly); err == nil {
+ dir := p.Dir
+ if p, err := runInDir(program, dir); err == nil {
+ return p, nil
+ }
+ }
+
+ // Try to run it in a standard temp directory.
+ return runInDir(program, "")
+}
+
+func writeProgram(importPath string, symbols []string) ([]byte, error) {
+ var program bytes.Buffer
+ data := reflectData{
+ ImportPath: importPath,
+ Symbols: symbols,
+ }
+ if err := reflectProgram.Execute(&program, &data); err != nil {
+ return nil, err
+ }
+ return program.Bytes(), nil
+}
+
+// run the given program and parse the output as a model.Package.
+func run(program string) (*model.Package, error) {
+ f, err := ioutil.TempFile("", "")
+ if err != nil {
+ return nil, err
+ }
+
+ filename := f.Name()
+ defer os.Remove(filename)
+ if err := f.Close(); err != nil {
+ return nil, err
+ }
+
+ // Run the program.
+ cmd := exec.Command(program, "-output", filename)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ return nil, err
+ }
+
+ f, err = os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ // Process output.
+ var pkg model.Package
+ if err := gob.NewDecoder(f).Decode(&pkg); err != nil {
+ return nil, err
+ }
+
+ if err := f.Close(); err != nil {
+ return nil, err
+ }
+
+ return &pkg, nil
+}
+
+// runInDir writes the given program into the given dir, runs it there, and
+// parses the output as a model.Package.
+func runInDir(program []byte, dir string) (*model.Package, error) {
+ // We use TempDir instead of TempFile so we can control the filename.
+ tmpDir, err := ioutil.TempDir(dir, "gomock_reflect_")
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err := os.RemoveAll(tmpDir); err != nil {
+ log.Printf("failed to remove temp directory: %s", err)
+ }
+ }()
+ const progSource = "prog.go"
+ var progBinary = "prog.bin"
+ if runtime.GOOS == "windows" {
+ // Windows won't execute a program unless it has a ".exe" suffix.
+ progBinary += ".exe"
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(tmpDir, progSource), program, 0600); err != nil {
+ return nil, err
+ }
+
+ cmdArgs := []string{}
+ cmdArgs = append(cmdArgs, "build")
+ if *buildFlags != "" {
+ cmdArgs = append(cmdArgs, strings.Split(*buildFlags, " ")...)
+ }
+ cmdArgs = append(cmdArgs, "-o", progBinary, progSource)
+
+ // Build the program.
+ buf := bytes.NewBuffer(nil)
+ cmd := exec.Command("go", cmdArgs...)
+ cmd.Dir = tmpDir
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = io.MultiWriter(os.Stderr, buf)
+ if err := cmd.Run(); err != nil {
+ sErr := buf.String()
+ if strings.Contains(sErr, `cannot find package "."`) &&
+ strings.Contains(sErr, "github.com/golang/mock/mockgen/model") {
+ fmt.Fprint(os.Stderr, "Please reference the steps in the README to fix this error:\n\thttps://github.com/golang/mock#reflect-vendoring-error.")
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return run(filepath.Join(tmpDir, progBinary))
+}
+
+type reflectData struct {
+ ImportPath string
+ Symbols []string
+}
+
+// This program reflects on an interface value, and prints the
+// gob encoding of a model.Package to standard output.
+// JSON doesn't work because of the model.Type interface.
+var reflectProgram = template.Must(template.New("program").Parse(`
+package main
+
+import (
+ "encoding/gob"
+ "flag"
+ "fmt"
+ "os"
+ "path"
+ "reflect"
+
+ "github.com/golang/mock/mockgen/model"
+
+ pkg_ {{printf "%q" .ImportPath}}
+)
+
+var output = flag.String("output", "", "The output file name, or empty to use stdout.")
+
+func main() {
+ flag.Parse()
+
+ its := []struct{
+ sym string
+ typ reflect.Type
+ }{
+ {{range .Symbols}}
+ { {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()},
+ {{end}}
+ }
+ pkg := &model.Package{
+ // NOTE: This behaves contrary to documented behaviour if the
+ // package name is not the final component of the import path.
+ // The reflect package doesn't expose the package name, though.
+ Name: path.Base({{printf "%q" .ImportPath}}),
+ }
+
+ for _, it := range its {
+ intf, err := model.InterfaceFromInterfaceType(it.typ)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Reflection: %v\n", err)
+ os.Exit(1)
+ }
+ intf.Name = it.sym
+ pkg.Interfaces = append(pkg.Interfaces, intf)
+ }
+
+ outfile := os.Stdout
+ if len(*output) != 0 {
+ var err error
+ outfile, err = os.Create(*output)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to open output file %q", *output)
+ }
+ defer func() {
+ if err := outfile.Close(); err != nil {
+ fmt.Fprintf(os.Stderr, "failed to close output file %q", *output)
+ os.Exit(1)
+ }
+ }()
+ }
+
+ if err := gob.NewEncoder(outfile).Encode(pkg); err != nil {
+ fmt.Fprintf(os.Stderr, "gob encode: %v\n", err)
+ os.Exit(1)
+ }
+}
+`))
diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_windows.go b/vendor/github.com/golang/mock/mockgen/version.1.11.go
similarity index 68%
rename from vendor/github.com/spacemonkeygo/spacelog/capture_windows.go
rename to vendor/github.com/golang/mock/mockgen/version.1.11.go
index e9f061dcf..e6b25db23 100644
--- a/vendor/github.com/spacemonkeygo/spacelog/capture_windows.go
+++ b/vendor/github.com/golang/mock/mockgen/version.1.11.go
@@ -1,10 +1,10 @@
-// Copyright (C) 2014 Space Monkey, Inc.
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,12 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package spacelog
+// +build !go1.12
+
+package main
import (
- "fmt"
+ "log"
)
-func CaptureOutputToFd(fd int) error {
- return fmt.Errorf("CaptureOutputToFd not supported on Windows")
+func printModuleVersion() {
+ log.Printf("No version information is available for Mockgen compiled with " +
+ "version 1.11")
}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go b/vendor/github.com/golang/mock/mockgen/version.1.12.go
similarity index 55%
rename from vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go
rename to vendor/github.com/golang/mock/mockgen/version.1.12.go
index edba3c2a5..ad121ae63 100644
--- a/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go
+++ b/vendor/github.com/golang/mock/mockgen/version.1.12.go
@@ -1,26 +1,35 @@
-// Copyright (C) 2014 Space Monkey, Inc.
+// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
+
+// +build go1.12
-package spacelog
+package main
import (
"fmt"
+ "log"
+ "runtime/debug"
)
-type SyslogPriority int
+func printModuleVersion() {
+ if bi, exists := debug.ReadBuildInfo(); exists {
+ fmt.Println(bi.Main.Version)
+ } else {
+ log.Printf("No version information found. Make sure to use " +
+ "GO111MODULE=on when running 'go get' in order to use specific " +
+ "version of the binary.")
+ }
-func NewSyslogOutput(facility SyslogPriority, tag string) (
- TextOutput, error) {
- return nil, fmt.Errorf("SyslogOutput not supported on Windows")
}
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
index fd2b3a42b..087320da7 100644
--- a/vendor/github.com/google/go-cmp/cmp/compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -13,21 +13,21 @@
//
// The primary features of cmp are:
//
-// • When the default behavior of equality does not suit the needs of the test,
-// custom equality functions can override the equality operation.
-// For example, an equality function may report floats as equal so long as they
-// are within some tolerance of each other.
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
//
-// • Types that have an Equal method may use that method to determine equality.
-// This allows package authors to determine the equality operation for the types
-// that they define.
+// - Types with an Equal method may use that method to determine equality.
+// This allows package authors to determine the equality operation
+// for the types that they define.
//
-// • If no custom equality functions are used and no Equal method is defined,
-// equality is determined by recursively comparing the primitive kinds on both
-// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
-// fields are not compared by default; they result in panics unless suppressed
-// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
-// compared using the Exporter option.
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
+// or explicitly compared using the Exporter option.
package cmp
import (
@@ -45,25 +45,25 @@ import (
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
-// • Let S be the set of all Ignore, Transformer, and Comparer options that
-// remain after applying all path filters, value filters, and type filters.
-// If at least one Ignore exists in S, then the comparison is ignored.
-// If the number of Transformer and Comparer options in S is greater than one,
-// then Equal panics because it is ambiguous which option to use.
-// If S contains a single Transformer, then use that to transform the current
-// values and recursively call Equal on the output values.
-// If S contains a single Comparer, then use that to compare the current values.
-// Otherwise, evaluation proceeds to the next rule.
+// - Let S be the set of all Ignore, Transformer, and Comparer options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one Ignore exists in S, then the comparison is ignored.
+// If the number of Transformer and Comparer options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single Transformer, then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single Comparer, then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
//
-// • If the values have an Equal method of the form "(T) Equal(T) bool" or
-// "(T) Equal(I) bool" where T is assignable to I, then use the result of
-// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
-// evaluation proceeds to the next rule.
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
//
-// • Lastly, try to compare x and y based on their basic kinds.
-// Simple kinds like booleans, integers, floats, complex numbers, strings, and
-// channels are compared using the equivalent of the == operator in Go.
-// Functions are only equal if they are both nil, otherwise they are unequal.
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
//
// Structs are equal if recursively calling Equal on all fields report equal.
// If a struct contains unexported fields, Equal panics unless an Ignore option
@@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep {
// so that they have the same parent type.
var t reflect.Type
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
- t = reflect.TypeOf((*interface{})(nil)).Elem()
+ t = anyType
if vx.IsValid() {
vvx := reflect.New(t).Elem()
vvx.Set(vx)
@@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int }
// Next increments the state and reports whether a check should be performed.
//
// Checks occur every Nth function call, where N is a triangular number:
+//
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
// See https://en.wikipedia.org/wiki/Triangular_number
//
// This sequence ensures that the cost of checks drops significantly as
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
index bc196b16c..a248e5436 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
// This function returns an edit-script, which is a sequence of operations
// needed to convert one list into the other. The following invariants for
// the edit-script are maintained:
-// • eq == (es.Dist()==0)
-// • nx == es.LenX()
-// • ny == es.LenY()
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
//
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
// produces an edit-script with a minimal Levenshtein distance). This algorithm
@@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
- // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
- // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
- // • fwdFrontier.X < revFrontier.X
- // • fwdFrontier.Y < revFrontier.Y
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
@@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
- // • Searching for differences switches back-and-forth between
- // a search that starts at the beginning (the top-left corner), and
- // a search that starts at the end (the bottom-right corner). The goal of
- // the search is connect with the search from the opposite corner.
- // • As we search, we build a path in a greedy manner, where the first
- // match seen is added to the path (this is sub-optimal, but provides a
- // decent result in practice). When matches are found, we try the next pair
- // of symbols in the lists and follow all matches as far as possible.
- // • When searching for matches, we search along a diagonal going through
- // through the "frontier" point. If no matches are found, we advance the
- // frontier towards the opposite corner.
- // • This algorithm terminates when either the X coordinates or the
- // Y coordinates of the forward and reverse frontier points ever intersect.
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
@@ -389,6 +392,7 @@ type point struct{ X, Y int }
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
func zigzag(x int) int {
if x&1 != 0 {
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
deleted file mode 100644
index 9147a2997..000000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package value
-
-import (
- "math"
- "reflect"
-)
-
-// IsZero reports whether v is the zero value.
-// This does not rely on Interface and so can be used on unexported fields.
-func IsZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return v.Bool() == false
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return math.Float64bits(v.Float()) == 0
- case reflect.Complex64, reflect.Complex128:
- return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
- case reflect.String:
- return v.String() == ""
- case reflect.UnsafePointer:
- return v.Pointer() == 0
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
- return v.IsNil()
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- if !IsZero(v.Index(i)) {
- return false
- }
- }
- return true
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- if !IsZero(v.Field(i)) {
- return false
- }
- }
- return true
- }
- return false
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index e57b9eb53..1f9ca9c48 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -33,6 +33,7 @@ type Option interface {
}
// applicableOption represents the following types:
+//
// Fundamental: ignore | validator | *comparer | *transformer
// Grouping: Options
type applicableOption interface {
@@ -43,6 +44,7 @@ type applicableOption interface {
}
// coreOption represents the following types:
+//
// Fundamental: ignore | validator | *comparer | *transformer
// Filters: *pathFilter | *valuesFilter
type coreOption interface {
@@ -336,9 +338,9 @@ func (tr transformer) String() string {
// both implement T.
//
// The equality function must be:
-// • Symmetric: equal(x, y) == equal(y, x)
-// • Deterministic: equal(x, y) == equal(x, y)
-// • Pure: equal(x, y) does not modify x or y
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
@@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
}
// Result represents the comparison result for a single node and
-// is provided by cmp when calling Result (see Reporter).
+// is provided by cmp when calling Report (see Reporter).
type Result struct {
_ [0]func() // Make Result incomparable
flags resultFlags
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
index c71003463..a0a588502 100644
--- a/vendor/github.com/google/go-cmp/cmp/path.go
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -41,13 +41,13 @@ type PathStep interface {
// The type of each valid value is guaranteed to be identical to Type.
//
// In some cases, one or both may be invalid or have restrictions:
- // • For StructField, both are not interface-able if the current field
- // is unexported and the struct type is not explicitly permitted by
- // an Exporter to traverse unexported fields.
- // • For SliceIndex, one may be invalid if an element is missing from
- // either the x or y slice.
- // • For MapIndex, one may be invalid if an entry is missing from
- // either the x or y map.
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
//
// The provided values must not be mutated.
Values() (vx, vy reflect.Value)
@@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep {
// The simplified path only contains struct field accesses.
//
// For example:
+//
// MyMap.MySlices.MyField
func (pa Path) String() string {
var ss []string
@@ -108,6 +109,7 @@ func (pa Path) String() string {
// GoString returns the path to a specific node using Go syntax.
//
// For example:
+//
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
func (pa Path) GoString() string {
var ssPre, ssPost []string
@@ -159,7 +161,7 @@ func (ps pathStep) String() string {
if ps.typ == nil {
return ""
}
- s := ps.typ.String()
+ s := value.TypeString(ps.typ, false)
if s == "" || strings.ContainsAny(s, "{}\n") {
return "root" // Type too simple or complex to print
}
@@ -282,7 +284,7 @@ type typeAssertion struct {
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
-func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
// Transform is a transformation from the parent type to the current type.
type Transform struct{ *transform }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
index 1ef65ac1d..2050bf6b4 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -7,8 +7,6 @@ package cmp
import (
"fmt"
"reflect"
-
- "github.com/google/go-cmp/cmp/internal/value"
)
// numContextRecords is the number of surrounding equal records to print.
@@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
// For leaf nodes, format the value based on the reflect.Values alone.
// As a special case, treat equal []byte as a leaf nodes.
- isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
if v.MaxDepth == 0 || isEqualBytes {
switch opts.DiffMode {
@@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt
var isZero bool
switch opts.DiffMode {
case diffIdentical:
- isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
case diffRemoved:
- isZero = value.IsZero(r.Value.ValueX)
+ isZero = r.Value.ValueX.IsZero()
case diffInserted:
- isZero = value.IsZero(r.Value.ValueY)
+ isZero = r.Value.ValueY.IsZero()
}
if isZero {
continue
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
index 287b89358..2ab41fad3 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -16,6 +16,13 @@ import (
"github.com/google/go-cmp/cmp/internal/value"
)
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
type formatValueOptions struct {
// AvoidStringer controls whether to avoid calling custom stringer
// methods like error.Error or fmt.Stringer.String.
@@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
}
for i := 0; i < v.NumField(); i++ {
vv := v.Field(i)
- if value.IsZero(vv) {
+ if vv.IsZero() {
continue // Elide fields with zero values
}
if len(list) == maxLen {
@@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
}
// Check whether this is a []byte of text data.
- if t.Elem() == reflect.TypeOf(byte(0)) {
+ if t.Elem() == byteType {
b := v.Bytes()
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
index 68b5c1ae1..23e444f62 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_slices.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
case t.Kind() == reflect.String:
sx, sy = vx.String(), vy.String()
isString = true
- case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
sx, sy = string(vx.Bytes()), string(vy.Bytes())
isString = true
case t.Kind() == reflect.Array:
@@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
})
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
- isPureLinedText = efficiencyLines < 4*efficiencyBytes
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
}
}
@@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
// differences in a string literal. This format is more readable,
// but has edge-cases where differences are visually indistinguishable.
// This format is avoided under the following conditions:
- // • A line starts with `"""`
- // • A line starts with "..."
- // • A line contains non-printable characters
- // • Adjacent different lines differ only by whitespace
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
//
// For example:
+ //
// """
// ... // 3 identical lines
// foo
@@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
switch t.Kind() {
case reflect.String:
- if t != reflect.TypeOf(string("")) {
+ if t != stringType {
out = opts.FormatType(t, out)
}
case reflect.Slice:
@@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
switch t.Kind() {
case reflect.String:
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
- if t != reflect.TypeOf(string("")) {
+ if t != stringType {
out = opts.FormatType(t, out)
}
case reflect.Slice:
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
- if t != reflect.TypeOf([]byte(nil)) {
+ if t != bytesType {
out = opts.FormatType(t, out)
}
}
@@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice(
// {NumIdentical: 3},
// {NumInserted: 1},
// ]
-//
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
var prevMode byte
lastStats := func(mode byte) *diffStats {
@@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
// {NumIdentical: 63},
// ]
-//
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
groups, groupsOrig := groups[:0], groups
for i, ds := range groupsOrig {
@@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
// {NumRemoved: 9},
// {NumIdentical: 64}, // incremented by 10
// ]
-//
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
var ix, iy int // indexes into sequence x and y
for i, ds := range groups {
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
index 0fd46d7ff..388fcf571 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_text.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats {
// String prints a humanly-readable summary of coalesced records.
//
// Example:
+//
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
func (s diffStats) String() string {
var ss []string
diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS
new file mode 100644
index 000000000..fd736cb1c
--- /dev/null
+++ b/vendor/github.com/google/pprof/AUTHORS
@@ -0,0 +1,7 @@
+# This is the official list of pprof authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+# Names should be added to this file as:
+# Name or Organization
+# The email address is not required for organizations.
+Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS
new file mode 100644
index 000000000..8c8c37d2c
--- /dev/null
+++ b/vendor/github.com/google/pprof/CONTRIBUTORS
@@ -0,0 +1,16 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name
+Raul Silvera
+Tipp Moseley
+Hyoun Kyu Cho
+Martin Spier
+Taco de Wolff
+Andrew Hunter
diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/pprof/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go
new file mode 100644
index 000000000..182c926b9
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/encode.go
@@ -0,0 +1,588 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "errors"
+ "sort"
+ "strings"
+)
+
+func (p *Profile) decoder() []decoder {
+ return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+ strings := make(map[string]int)
+ addString(strings, "")
+
+ for _, st := range p.SampleType {
+ st.typeX = addString(strings, st.Type)
+ st.unitX = addString(strings, st.Unit)
+ }
+
+ for _, s := range p.Sample {
+ s.labelX = nil
+ var keys []string
+ for k := range s.Label {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := s.Label[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ label{
+ keyX: addString(strings, k),
+ strX: addString(strings, v),
+ },
+ )
+ }
+ }
+ var numKeys []string
+ for k := range s.NumLabel {
+ numKeys = append(numKeys, k)
+ }
+ sort.Strings(numKeys)
+ for _, k := range numKeys {
+ keyX := addString(strings, k)
+ vs := s.NumLabel[k]
+ units := s.NumUnit[k]
+ for i, v := range vs {
+ var unitX int64
+ if len(units) != 0 {
+ unitX = addString(strings, units[i])
+ }
+ s.labelX = append(s.labelX,
+ label{
+ keyX: keyX,
+ numX: v,
+ unitX: unitX,
+ },
+ )
+ }
+ }
+ s.locationIDX = make([]uint64, len(s.Location))
+ for i, loc := range s.Location {
+ s.locationIDX[i] = loc.ID
+ }
+ }
+
+ for _, m := range p.Mapping {
+ m.fileX = addString(strings, m.File)
+ m.buildIDX = addString(strings, m.BuildID)
+ }
+
+ for _, l := range p.Location {
+ for i, ln := range l.Line {
+ if ln.Function != nil {
+ l.Line[i].functionIDX = ln.Function.ID
+ } else {
+ l.Line[i].functionIDX = 0
+ }
+ }
+ if l.Mapping != nil {
+ l.mappingIDX = l.Mapping.ID
+ } else {
+ l.mappingIDX = 0
+ }
+ }
+ for _, f := range p.Function {
+ f.nameX = addString(strings, f.Name)
+ f.systemNameX = addString(strings, f.SystemName)
+ f.filenameX = addString(strings, f.Filename)
+ }
+
+ p.dropFramesX = addString(strings, p.DropFrames)
+ p.keepFramesX = addString(strings, p.KeepFrames)
+
+ if pt := p.PeriodType; pt != nil {
+ pt.typeX = addString(strings, pt.Type)
+ pt.unitX = addString(strings, pt.Unit)
+ }
+
+ p.commentX = nil
+ for _, c := range p.Comments {
+ p.commentX = append(p.commentX, addString(strings, c))
+ }
+
+ p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
+
+ p.stringTable = make([]string, len(strings))
+ for s, i := range strings {
+ p.stringTable[i] = s
+ }
+}
+
+func (p *Profile) encode(b *buffer) {
+ for _, x := range p.SampleType {
+ encodeMessage(b, 1, x)
+ }
+ for _, x := range p.Sample {
+ encodeMessage(b, 2, x)
+ }
+ for _, x := range p.Mapping {
+ encodeMessage(b, 3, x)
+ }
+ for _, x := range p.Location {
+ encodeMessage(b, 4, x)
+ }
+ for _, x := range p.Function {
+ encodeMessage(b, 5, x)
+ }
+ encodeStrings(b, 6, p.stringTable)
+ encodeInt64Opt(b, 7, p.dropFramesX)
+ encodeInt64Opt(b, 8, p.keepFramesX)
+ encodeInt64Opt(b, 9, p.TimeNanos)
+ encodeInt64Opt(b, 10, p.DurationNanos)
+ if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+ encodeMessage(b, 11, p.PeriodType)
+ }
+ encodeInt64Opt(b, 12, p.Period)
+ encodeInt64s(b, 13, p.commentX)
+ encodeInt64(b, 14, p.defaultSampleTypeX)
+}
+
+var profileDecoder = []decoder{
+ nil, // 0
+ // repeated ValueType sample_type = 1
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.SampleType = append(pp.SampleType, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Sample sample = 2
+ func(b *buffer, m message) error {
+ x := new(Sample)
+ pp := m.(*Profile)
+ pp.Sample = append(pp.Sample, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Mapping mapping = 3
+ func(b *buffer, m message) error {
+ x := new(Mapping)
+ pp := m.(*Profile)
+ pp.Mapping = append(pp.Mapping, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Location location = 4
+ func(b *buffer, m message) error {
+ x := new(Location)
+ x.Line = b.tmpLines[:0] // Use shared space temporarily
+ pp := m.(*Profile)
+ pp.Location = append(pp.Location, x)
+ err := decodeMessage(b, x)
+ b.tmpLines = x.Line[:0]
+ // Copy to shrink size and detach from shared space.
+ x.Line = append([]Line(nil), x.Line...)
+ return err
+ },
+ // repeated Function function = 5
+ func(b *buffer, m message) error {
+ x := new(Function)
+ pp := m.(*Profile)
+ pp.Function = append(pp.Function, x)
+ return decodeMessage(b, x)
+ },
+ // repeated string string_table = 6
+ func(b *buffer, m message) error {
+ err := decodeStrings(b, &m.(*Profile).stringTable)
+ if err != nil {
+ return err
+ }
+ if m.(*Profile).stringTable[0] != "" {
+ return errors.New("string_table[0] must be ''")
+ }
+ return nil
+ },
+ // int64 drop_frames = 7
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+ // int64 keep_frames = 8
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+ // int64 time_nanos = 9
+ func(b *buffer, m message) error {
+ if m.(*Profile).TimeNanos != 0 {
+ return errConcatProfile
+ }
+ return decodeInt64(b, &m.(*Profile).TimeNanos)
+ },
+ // int64 duration_nanos = 10
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+ // ValueType period_type = 11
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.PeriodType = x
+ return decodeMessage(b, x)
+ },
+ // int64 period = 12
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+ // repeated int64 comment = 13
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
+ // int64 defaultSampleType = 14
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+ var err error
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ mappingIds := make([]*Mapping, len(p.Mapping)+1)
+ for _, m := range p.Mapping {
+ m.File, err = getString(p.stringTable, &m.fileX, err)
+ m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+ if m.ID < uint64(len(mappingIds)) {
+ mappingIds[m.ID] = m
+ } else {
+ mappings[m.ID] = m
+ }
+
+ // If this a main linux kernel mapping with a relocation symbol suffix
+ // ("[kernel.kallsyms]_text"), extract said suffix.
+ // It is fairly hacky to handle at this level, but the alternatives appear even worse.
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
+ }
+ }
+
+ functions := make(map[uint64]*Function, len(p.Function))
+ functionIds := make([]*Function, len(p.Function)+1)
+ for _, f := range p.Function {
+ f.Name, err = getString(p.stringTable, &f.nameX, err)
+ f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+ f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+ if f.ID < uint64(len(functionIds)) {
+ functionIds[f.ID] = f
+ } else {
+ functions[f.ID] = f
+ }
+ }
+
+ locations := make(map[uint64]*Location, len(p.Location))
+ locationIds := make([]*Location, len(p.Location)+1)
+ for _, l := range p.Location {
+ if id := l.mappingIDX; id < uint64(len(mappingIds)) {
+ l.Mapping = mappingIds[id]
+ } else {
+ l.Mapping = mappings[id]
+ }
+ l.mappingIDX = 0
+ for i, ln := range l.Line {
+ if id := ln.functionIDX; id != 0 {
+ l.Line[i].functionIDX = 0
+ if id < uint64(len(functionIds)) {
+ l.Line[i].Function = functionIds[id]
+ } else {
+ l.Line[i].Function = functions[id]
+ }
+ }
+ }
+ if l.ID < uint64(len(locationIds)) {
+ locationIds[l.ID] = l
+ } else {
+ locations[l.ID] = l
+ }
+ }
+
+ for _, st := range p.SampleType {
+ st.Type, err = getString(p.stringTable, &st.typeX, err)
+ st.Unit, err = getString(p.stringTable, &st.unitX, err)
+ }
+
+ // Pre-allocate space for all locations.
+ numLocations := 0
+ for _, s := range p.Sample {
+ numLocations += len(s.locationIDX)
+ }
+ locBuffer := make([]*Location, numLocations)
+
+ for _, s := range p.Sample {
+ if len(s.labelX) > 0 {
+ labels := make(map[string][]string, len(s.labelX))
+ numLabels := make(map[string][]int64, len(s.labelX))
+ numUnits := make(map[string][]string, len(s.labelX))
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else if l.numX != 0 || l.unitX != 0 {
+ numValues := numLabels[key]
+ units := numUnits[key]
+ if l.unitX != 0 {
+ var unit string
+ unit, err = getString(p.stringTable, &l.unitX, err)
+ units = padStringArray(units, len(numValues))
+ numUnits[key] = append(units, unit)
+ }
+ numLabels[key] = append(numLabels[key], l.numX)
+ }
+ }
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ for key, units := range numUnits {
+ if len(units) > 0 {
+ numUnits[key] = padStringArray(units, len(numLabels[key]))
+ }
+ }
+ s.NumUnit = numUnits
+ }
+ }
+
+ s.Location = locBuffer[:len(s.locationIDX)]
+ locBuffer = locBuffer[len(s.locationIDX):]
+ for i, lid := range s.locationIDX {
+ if lid < uint64(len(locationIds)) {
+ s.Location[i] = locationIds[lid]
+ } else {
+ s.Location[i] = locations[lid]
+ }
+ }
+ s.locationIDX = nil
+ }
+
+ p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+ p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+ if pt := p.PeriodType; pt == nil {
+ p.PeriodType = &ValueType{}
+ }
+
+ if pt := p.PeriodType; pt != nil {
+ pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+ pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+ }
+
+ for _, i := range p.commentX {
+ var c string
+ c, err = getString(p.stringTable, &i, err)
+ p.Comments = append(p.Comments, c)
+ }
+
+ p.commentX = nil
+ p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+ p.stringTable = nil
+ return err
+}
+
+// padStringArray pads arr with enough empty strings to make arr
+// length l when arr's length is less than l.
+func padStringArray(arr []string, l int) []string {
+ if l <= len(arr) {
+ return arr
+ }
+ return append(arr, make([]string, l-len(arr))...)
+}
+
+func (p *ValueType) decoder() []decoder {
+ return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.typeX)
+ encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+ nil, // 0
+ // optional int64 type = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+ // optional int64 unit = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+ return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+ encodeUint64s(b, 1, p.locationIDX)
+ encodeInt64s(b, 2, p.Value)
+ for _, x := range p.labelX {
+ encodeMessage(b, 3, x)
+ }
+}
+
+var sampleDecoder = []decoder{
+ nil, // 0
+ // repeated uint64 location = 1
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+ // repeated int64 value = 2
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+ // repeated Label label = 3
+ func(b *buffer, m message) error {
+ s := m.(*Sample)
+ n := len(s.labelX)
+ s.labelX = append(s.labelX, label{})
+ return decodeMessage(b, &s.labelX[n])
+ },
+}
+
+func (p label) decoder() []decoder {
+ return labelDecoder
+}
+
+func (p label) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.keyX)
+ encodeInt64Opt(b, 2, p.strX)
+ encodeInt64Opt(b, 3, p.numX)
+ encodeInt64Opt(b, 4, p.unitX)
+}
+
+var labelDecoder = []decoder{
+ nil, // 0
+ // optional int64 key = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
+ // optional int64 str = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
+ // optional int64 num = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
+ // optional int64 num = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+ return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.Start)
+ encodeUint64Opt(b, 3, p.Limit)
+ encodeUint64Opt(b, 4, p.Offset)
+ encodeInt64Opt(b, 5, p.fileX)
+ encodeInt64Opt(b, 6, p.buildIDX)
+ encodeBoolOpt(b, 7, p.HasFunctions)
+ encodeBoolOpt(b, 8, p.HasFilenames)
+ encodeBoolOpt(b, 9, p.HasLineNumbers)
+ encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+ return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.mappingIDX)
+ encodeUint64Opt(b, 3, p.Address)
+ for i := range p.Line {
+ encodeMessage(b, 4, &p.Line[i])
+ }
+ encodeBoolOpt(b, 5, p.IsFolded)
+}
+
+var locationDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
+ func(b *buffer, m message) error { // repeated Line line = 4
+ pp := m.(*Location)
+ n := len(pp.Line)
+ pp.Line = append(pp.Line, Line{})
+ return decodeMessage(b, &pp.Line[n])
+ },
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
+}
+
+func (p *Line) decoder() []decoder {
+ return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.functionIDX)
+ encodeInt64Opt(b, 2, p.Line)
+}
+
+var lineDecoder = []decoder{
+ nil, // 0
+ // optional uint64 function_id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+ // optional int64 line = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+}
+
+func (p *Function) decoder() []decoder {
+ return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeInt64Opt(b, 2, p.nameX)
+ encodeInt64Opt(b, 3, p.systemNameX)
+ encodeInt64Opt(b, 4, p.filenameX)
+ encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+ nil, // 0
+ // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+ // optional int64 function_name = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+ // optional int64 function_system_name = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+ // repeated int64 filename = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+ // optional int64 start_line = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+ i, ok := strings[s]
+ if !ok {
+ i = len(strings)
+ strings[s] = i
+ }
+ return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ s := int(*strng)
+ if s < 0 || s >= len(strings) {
+ return "", errMalformed
+ }
+ *strng = 0
+ return strings[s], nil
+}
diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go
new file mode 100644
index 000000000..c794b9390
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/filter.go
@@ -0,0 +1,274 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+// Implements methods to filter samples from profiles.
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+ if focus == nil && ignore == nil && hide == nil && show == nil {
+ fm = true // Missing focus implies a match
+ return
+ }
+ focusOrIgnore := make(map[uint64]bool)
+ hidden := make(map[uint64]bool)
+ for _, l := range p.Location {
+ if ignore != nil && l.matchesName(ignore) {
+ im = true
+ focusOrIgnore[l.ID] = false
+ } else if focus == nil || l.matchesName(focus) {
+ fm = true
+ focusOrIgnore[l.ID] = true
+ }
+
+ if hide != nil && l.matchesName(hide) {
+ hm = true
+ l.Line = l.unmatchedLines(hide)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ }
+ }
+ if show != nil {
+ l.Line = l.matchedLines(show)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ } else {
+ hnm = true
+ }
+ }
+ }
+
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+ if len(hidden) > 0 {
+ var locs []*Location
+ for _, loc := range sample.Location {
+ if !hidden[loc.ID] {
+ locs = append(locs, loc)
+ }
+ }
+ if len(locs) == 0 {
+ // Remove sample with no locations (by not adding it to s).
+ continue
+ }
+ sample.Location = locs
+ }
+ s = append(s, sample)
+ }
+ }
+ p.Sample = s
+
+ return
+}
+
+// ShowFrom drops all stack frames above the highest matching frame and returns
+// whether a match was found. If showFrom is nil it returns false and does not
+// modify the profile.
+//
+// Example: consider a sample with frames [A, B, C, B], where A is the root.
+// ShowFrom(nil) returns false and has frames [A, B, C, B].
+// ShowFrom(A) returns true and has frames [A, B, C, B].
+// ShowFrom(B) returns true and has frames [B, C, B].
+// ShowFrom(C) returns true and has frames [C, B].
+// ShowFrom(D) returns false and drops the sample because no frames remain.
+func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
+ if showFrom == nil {
+ return false
+ }
+ // showFromLocs stores location IDs that matched ShowFrom.
+ showFromLocs := make(map[uint64]bool)
+ // Apply to locations.
+ for _, loc := range p.Location {
+ if filterShowFromLocation(loc, showFrom) {
+ showFromLocs[loc.ID] = true
+ matched = true
+ }
+ }
+ // For all samples, strip locations after the highest matching one.
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ if showFromLocs[sample.Location[i].ID] {
+ sample.Location = sample.Location[:i+1]
+ s = append(s, sample)
+ break
+ }
+ }
+ }
+ p.Sample = s
+ return matched
+}
+
+// filterShowFromLocation tests a showFrom regex against a location, removes
+// lines after the last match and returns whether a match was found. If the
+// mapping is matched, then all lines are kept.
+func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
+ if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
+ return true
+ }
+ if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
+ loc.Line = loc.Line[:i+1]
+ return true
+ }
+ return false
+}
+
+// lastMatchedLineIndex returns the index of the last line that matches a regex,
+// or -1 if no match is found.
+func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
+ for i := len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// FilterTagsByName filters the tags in a profile and only keeps
+// tags that match show and not hide.
+func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
+ matchRemove := func(name string) bool {
+ matchShow := show == nil || show.MatchString(name)
+ matchHide := hide != nil && hide.MatchString(name)
+
+ if matchShow {
+ sm = true
+ }
+ if matchHide {
+ hm = true
+ }
+ return !matchShow || matchHide
+ }
+ for _, s := range p.Sample {
+ for lab := range s.Label {
+ if matchRemove(lab) {
+ delete(s.Label, lab)
+ }
+ }
+ for lab := range s.NumLabel {
+ if matchRemove(lab) {
+ delete(s.NumLabel, lab)
+ }
+ }
+ }
+ return
+}
+
+// matchesName returns whether the location matches the regular
+// expression. It checks any available function names, file names, and
+// mapping object filename.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return true
+ }
+ }
+ }
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return true
+ }
+ return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return nil
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// matchedLines returns the lines in the location that match
+// the regular expression.
+func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return loc.Line
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+ var f bool
+ for _, loc := range locs {
+ if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+ if focus {
+ // Found focused location. Must keep searching in case there
+ // is an ignored one as well.
+ f = true
+ } else {
+ // Found ignored location. Can return false right away.
+ return false
+ }
+ }
+ }
+ return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(s *Sample) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+ samples := make([]*Sample, 0, len(p.Sample))
+ for _, s := range p.Sample {
+ focused, ignored := true, false
+ if focus != nil {
+ focused = focus(s)
+ }
+ if ignore != nil {
+ ignored = ignore(s)
+ }
+ fm = fm || focused
+ im = im || ignored
+ if focused && !ignored {
+ samples = append(samples, s)
+ }
+ }
+ p.Sample = samples
+ return
+}
diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go
new file mode 100644
index 000000000..bef1d6046
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/index.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// SampleIndexByName returns the appropriate index for a value of sample index.
+// If numeric, it returns the number, otherwise it looks up the text in the
+// profile sample types.
+func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
+ if sampleIndex == "" {
+ if dst := p.DefaultSampleType; dst != "" {
+ for i, t := range sampleTypes(p) {
+ if t == dst {
+ return i, nil
+ }
+ }
+ }
+ // By default select the last sample value
+ return len(p.SampleType) - 1, nil
+ }
+ if i, err := strconv.Atoi(sampleIndex); err == nil {
+ if i < 0 || i >= len(p.SampleType) {
+ return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
+ }
+ return i, nil
+ }
+
+ // Remove the inuse_ prefix to support legacy pprof options
+ // "inuse_space" and "inuse_objects" for profiles containing types
+ // "space" and "objects".
+ noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
+ for i, t := range p.SampleType {
+ if t.Type == sampleIndex || t.Type == noInuse {
+ return i, nil
+ }
+ }
+
+ return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
+}
+
+func sampleTypes(p *Profile) []string {
+ types := make([]string, len(p.SampleType))
+ for i, t := range p.SampleType {
+ types[i] = t.Type
+ }
+ return types
+}
diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
new file mode 100644
index 000000000..91f45e53c
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
@@ -0,0 +1,315 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert java legacy profiles into
+// the profile.proto format.
+
+package profile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
+ javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
+ javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
+ javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
+ javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
+)
+
+// javaCPUProfile returns a new Profile from profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
+ }
+ var err error
+ var locs map[uint64]*Location
+ if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
+ return nil, err
+ }
+
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaProfile returns a new profile from heapz or contentionz
+// data. b is the profile bytes after the header.
+func parseJavaProfile(b []byte) (*Profile, error) {
+ h := bytes.SplitAfterN(b, []byte("\n"), 2)
+ if len(h) < 2 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{},
+ }
+ header := string(bytes.TrimSpace(h[0]))
+
+ var err error
+ var pType string
+ switch header {
+ case "--- heapz 1 ---":
+ pType = "heap"
+ case "--- contentionz 1 ---":
+ pType = "contention"
+ default:
+ return nil, errUnrecognized
+ }
+
+ if b, err = parseJavaHeader(pType, h[1], p); err != nil {
+ return nil, err
+ }
+ var locs map[uint64]*Location
+ if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
+ return nil, err
+ }
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaHeader parses the attribute section on a java profile and
+// populates a profile. Returns the remainder of the buffer after all
+// attributes.
+func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ h := attributeRx.FindStringSubmatch(line)
+ if h == nil {
+ // Not a valid attribute, exit.
+ return b, nil
+ }
+
+ attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
+ var err error
+ switch pType + "/" + attribute {
+ case "heap/format", "cpu/format", "contention/format":
+ if value != "java" {
+ return nil, errUnrecognized
+ }
+ case "heap/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: value},
+ }
+ case "contention/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: value},
+ }
+ case "contention/sampling period":
+ p.PeriodType = &ValueType{
+ Type: "contentions", Unit: "count",
+ }
+ if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ case "contention/ms since reset":
+ millis, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ p.DurationNanos = millis * 1000 * 1000
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, nil
+}
+
+// parseJavaSamples parses the samples from a java profile and
+// populates the Samples in a profile. Returns the remainder of the
+// buffer after the samples.
+func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ locs := make(map[uint64]*Location)
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ sample := javaSampleRx.FindStringSubmatch(line)
+ if sample == nil {
+ // Not a valid sample, exit.
+ return b, locs, nil
+ }
+
+ // Java profiles have data/fields inverted compared to other
+ // profile types.
+ var err error
+ value1, value2, value3 := sample[2], sample[1], sample[3]
+ addrs, err := parseHexAddresses(value3)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ s := &Sample{
+ Value: make([]int64, 2),
+ Location: sloc,
+ }
+
+ if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+
+ switch pType {
+ case "heap":
+ const javaHeapzSamplingRate = 524288 // 512K
+ if s.Value[0] == 0 {
+ return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
+ }
+ s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
+ s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
+ case "contention":
+ if period := p.Period; period != 0 {
+ s.Value[0] = s.Value[0] * p.Period
+ s.Value[1] = s.Value[1] * p.Period
+ }
+ }
+ p.Sample = append(p.Sample, s)
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, locs, nil
+}
+
+// parseJavaLocations parses the location information in a java
+// profile and populates the Locations in a profile. It uses the
+// location addresses from the profile as both the ID of each
+// location.
+func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
+ r := bytes.NewBuffer(b)
+ fns := make(map[string]*Function)
+ for {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if line == "" {
+ break
+ }
+ }
+
+ if line = strings.TrimSpace(line); line == "" {
+ continue
+ }
+
+ jloc := javaLocationRx.FindStringSubmatch(line)
+ if len(jloc) != 3 {
+ continue
+ }
+ addr, err := strconv.ParseUint(jloc[1], 16, 64)
+ if err != nil {
+ return fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ loc := locs[addr]
+ if loc == nil {
+ // Unused/unseen
+ continue
+ }
+ var lineFunc, lineFile string
+ var lineNo int64
+
+ if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
+ // Found a line of the form: "function (file:line)"
+ lineFunc, lineFile = fileLine[1], fileLine[2]
+ if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
+ lineNo = n
+ }
+ } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
+ // If there's not a file:line, it's a shared library path.
+ // The path isn't interesting, so just give the .so.
+ lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
+ } else if strings.Contains(jloc[2], "generated stub/JIT") {
+ lineFunc = "STUB"
+ } else {
+ // Treat whole line as the function name. This is used by the
+ // java agent for internal states such as "GC" or "VM".
+ lineFunc = jloc[2]
+ }
+ fn := fns[lineFunc]
+
+ if fn == nil {
+ fn = &Function{
+ Name: lineFunc,
+ SystemName: lineFunc,
+ Filename: lineFile,
+ }
+ fns[lineFunc] = fn
+ p.Function = append(p.Function, fn)
+ }
+ loc.Line = []Line{
+ {
+ Function: fn,
+ Line: lineNo,
+ },
+ }
+ loc.Address = 0
+ }
+
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+
+ return nil
+}
diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go
new file mode 100644
index 000000000..8d07fd6c2
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/legacy_profile.go
@@ -0,0 +1,1228 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`)
+ countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`)
+
+ heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+ heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+ contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+ hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
+
+ growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`)
+
+ fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`)
+
+ threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
+ threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+ // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools.
+ // Recommended format:
+ // Start End object file name offset(optional) linker build id
+ // 0x40000-0x80000 /path/to/binary (@FF00) abc123456
+ spaceDigits = `\s+[[:digit:]]+`
+ hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+`
+ oSpace = `\s*`
+ // Capturing expressions.
+ cHex = `(?:0x)?([[:xdigit:]]+)`
+ cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?`
+ cSpaceString = `(?:\s+(\S+))?`
+ cSpaceHex = `(?:\s+([[:xdigit:]]+))?`
+ cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?`
+ cPerm = `(?:\s+([-rwxp]+))?`
+
+ procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString)
+ briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex)
+
+ // Regular expression to parse log data, of the form:
+ // ... file:line] msg...
+ logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`)
+)
+
+func isSpaceOrComment(line string) bool {
+ trimmed := strings.TrimSpace(line)
+ return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip comments at the beginning of the file.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ m := countStartRE.FindStringSubmatch(s.Text())
+ if m == nil {
+ return nil, errUnrecognized
+ }
+ profileType := m[1]
+ p := &Profile{
+ PeriodType: &ValueType{Type: profileType, Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+ }
+ locations := make(map[uint64]*Location)
+ for s.Scan() {
+ line := s.Text()
+ if isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ m := countRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errMalformed
+ }
+ n, err := strconv.ParseInt(m[1], 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ fields := strings.Fields(m[2])
+ locs := make([]*Location, 0, len(fields))
+ for _, stk := range fields {
+ addr, err := strconv.ParseUint(stk, 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ // Adjust all frames by -1 to land on top of the call instruction.
+ addr--
+ loc := locations[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locations[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ locs = append(locs, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Location: locs,
+ Value: []int64{n},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+ seen := make(map[*Location]bool, len(p.Location))
+ var locs []*Location
+
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ if seen[l] {
+ continue
+ }
+ l.ID = uint64(len(locs) + 1)
+ locs = append(locs, l)
+ seen[l] = true
+ }
+ }
+ p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+ seen := make(map[*Function]bool, len(p.Function))
+ var fns []*Function
+
+ for _, l := range p.Location {
+ for _, ln := range l.Line {
+ fn := ln.Function
+ if fn == nil || seen[fn] {
+ continue
+ }
+ fn.ID = uint64(len(fns) + 1)
+ fns = append(fns, fn)
+ seen[fn] = true
+ }
+ }
+ p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+ // Some profile handlers will incorrectly set regions for the main
+ // executable if its section is remapped. Fix them through heuristics.
+
+ if len(p.Mapping) > 0 {
+ // Remove the initial mapping if named '/anon_hugepage' and has a
+ // consecutive adjacent mapping.
+ if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+ if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+ p.Mapping = p.Mapping[1:]
+ }
+ }
+ }
+
+ // Subtract the offset from the start of the main mapping if it
+ // ends up at a recognizable start address.
+ if len(p.Mapping) > 0 {
+ const expectedStart = 0x400000
+ if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
+ m.Start = expectedStart
+ m.Offset = 0
+ }
+ }
+
+ // Associate each location with an address to the corresponding
+ // mapping. Create fake mapping if a suitable one isn't found.
+ var fake *Mapping
+nextLocation:
+ for _, l := range p.Location {
+ a := l.Address
+ if l.Mapping != nil || a == 0 {
+ continue
+ }
+ for _, m := range p.Mapping {
+ if m.Start <= a && a < m.Limit {
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // Work around legacy handlers failing to encode the first
+ // part of mappings split into adjacent ranges.
+ for _, m := range p.Mapping {
+ if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start {
+ m.Start -= m.Offset
+ m.Offset = 0
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // If there is still no mapping, create a fake one.
+ // This is important for the Go legacy handler, which produced
+ // no mappings.
+ if fake == nil {
+ fake = &Mapping{
+ ID: 1,
+ Limit: ^uint64(0),
+ }
+ p.Mapping = append(p.Mapping, fake)
+ }
+ l.Mapping = fake
+ }
+
+ // Reset all mapping IDs.
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+ get32l,
+ get32b,
+ get64l,
+ get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+ var parse func([]byte) (uint64, []byte)
+ var n1, n2, n3, n4, n5 uint64
+ for _, parse = range cpuInts {
+ var tmp []byte
+ n1, tmp = parse(b)
+ n2, tmp = parse(tmp)
+ n3, tmp = parse(tmp)
+ n4, tmp = parse(tmp)
+ n5, tmp = parse(tmp)
+
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return cpuProfile(b, int64(n4), parse)
+ }
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return javaCPUProfile(b, int64(n4), parse)
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ },
+ }
+ var err error
+ if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+ return nil, err
+ }
+
+ // If *most* samples have the same second-to-the-bottom frame, it
+ // strongly suggests that it is an uninteresting artifact of
+ // measurement -- a stack frame pushed by the signal handler. The
+ // bottom frame is always correct as it is picked up from the signal
+ // structure, not the stack. Check if this is the case and if so,
+ // remove.
+
+ // Remove up to two frames.
+ maxiter := 2
+ // Allow one different sample for this many samples with the same
+ // second-to-last frame.
+ similarSamples := 32
+ margin := len(p.Sample) / similarSamples
+
+ for iter := 0; iter < maxiter; iter++ {
+ addr1 := make(map[uint64]int)
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 {
+ a := s.Location[1].Address
+ addr1[a] = addr1[a] + 1
+ }
+ }
+
+ for id1, count := range addr1 {
+ if count >= len(p.Sample)-margin {
+ // Found uninteresting frame, strip it out from all samples
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[1].Address == id1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+ break
+ }
+ }
+ }
+
+ if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+func cleanupDuplicateLocations(p *Profile) {
+ // The profile handler may duplicate the leaf frame, because it gets
+ // its address both from stack unwinding and from the signal
+ // context. Detect this and delete the duplicate, which has been
+ // adjusted by -1. The leaf address should not be adjusted as it is
+ // not a call.
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
+// The last stack trace is of the form:
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+ locs := make(map[uint64]*Location)
+ for len(b) > 0 {
+ var count, nstk uint64
+ count, b = parse(b)
+ nstk, b = parse(b)
+ if b == nil || nstk > uint64(len(b)/4) {
+ return nil, nil, errUnrecognized
+ }
+ var sloc []*Location
+ addrs := make([]uint64, nstk)
+ for i := 0; i < int(nstk); i++ {
+ addrs[i], b = parse(b)
+ }
+
+ if count == 0 && nstk == 1 && addrs[0] == 0 {
+ // End of data marker
+ break
+ }
+ for i, addr := range addrs {
+ if adjust && i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locs[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{int64(count), int64(count) * p.Period},
+ Location: sloc,
+ })
+ }
+ // Reached the end without finding the EOD marker.
+ return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+ p = &Profile{}
+
+ sampling := ""
+ hasAlloc := false
+
+ line := s.Text()
+ p.PeriodType = &ValueType{Type: "space", Unit: "bytes"}
+ if header := heapHeaderRE.FindStringSubmatch(line); header != nil {
+ sampling, p.Period, hasAlloc, err = parseHeapHeader(line)
+ if err != nil {
+ return nil, err
+ }
+ } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else {
+ return nil, errUnrecognized
+ }
+
+ if hasAlloc {
+ // Put alloc before inuse so that default pprof selection
+ // will prefer inuse_space.
+ p.SampleType = []*ValueType{
+ {Type: "alloc_objects", Unit: "count"},
+ {Type: "alloc_space", Unit: "bytes"},
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: "bytes"},
+ }
+ } else {
+ p.SampleType = []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+
+ if isSpaceOrComment(line) {
+ continue
+ }
+
+ if isMemoryMapSentinel(line) {
+ break
+ }
+
+ value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc)
+ if err != nil {
+ return nil, err
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ NumLabel: map[string][]int64{"bytes": {blocksize}},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) {
+ header := heapHeaderRE.FindStringSubmatch(line)
+ if header == nil {
+ return "", 0, false, errUnrecognized
+ }
+
+ if len(header[6]) > 0 {
+ if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+ return "", 0, false, errUnrecognized
+ }
+ }
+
+ if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") {
+ hasAlloc = true
+ }
+
+ switch header[5] {
+ case "heapz_v2", "heap_v2":
+ return "v2", period, hasAlloc, nil
+ case "heapprofile":
+ return "", 1, hasAlloc, nil
+ case "heap":
+ return "v2", period / 2, hasAlloc, nil
+ default:
+ return "", 0, false, errUnrecognized
+ }
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) {
+ sampleData := heapSampleRE.FindStringSubmatch(line)
+ if len(sampleData) != 6 {
+ return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+ }
+
+ // This is a local-scoped helper function to avoid needing to pass
+ // around rate, sampling and many return parameters.
+ addValues := func(countString, sizeString string, label string) error {
+ count, err := strconv.ParseInt(countString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ size, err := strconv.ParseInt(sizeString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ if count == 0 && size != 0 {
+ return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size)
+ }
+ if count != 0 {
+ blocksize = size / count
+ if sampling == "v2" {
+ count, size = scaleHeapSample(count, size, rate)
+ }
+ }
+ value = append(value, count, size)
+ return nil
+ }
+
+ if includeAlloc {
+ if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil {
+ return nil, 0, nil, err
+ }
+ }
+
+ if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil {
+ return nil, 0, nil, err
+ }
+
+ addrs, err = parseHexAddresses(sampleData[5])
+ if err != nil {
+ return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, blocksize, addrs, nil
+}
+
+// parseHexAddresses extracts hex numbers from a string, attempts to convert
+// each to an unsigned 64-bit number and returns the resulting numbers as a
+// slice, or an error if the string contains hex numbers which are too large to
+// handle (which means a malformed profile).
+func parseHexAddresses(s string) ([]uint64, error) {
+ hexStrings := hexNumberRE.FindAllString(s, -1)
+ var addrs []uint64
+ for _, s := range hexStrings {
+ if addr, err := strconv.ParseUint(s, 0, 64); err == nil {
+ addrs = append(addrs, addr)
+ } else {
+ return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s)
+ }
+ }
+ return addrs, nil
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+func parseContention(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+
+ switch l := s.Text(); {
+ case strings.HasPrefix(l, "--- contentionz "):
+ case strings.HasPrefix(l, "--- mutex:"):
+ case strings.HasPrefix(l, "--- contention:"):
+ default:
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: "nanoseconds"},
+ },
+ }
+
+ var cpuHz int64
+ // Parse text of the form "attribute = value" before the samples.
+ const delimiter = "="
+ for s.Scan() {
+ line := s.Text()
+ if line = strings.TrimSpace(line); isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ attr := strings.SplitN(line, delimiter, 2)
+ if len(attr) != 2 {
+ break
+ }
+ key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
+ var err error
+ switch key {
+ case "cycles/second":
+ if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "sampling period":
+ if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "ms since reset":
+ ms, err := strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ p.DurationNanos = ms * 1000 * 1000
+ case "format":
+ // CPP contentionz profiles don't have format.
+ return nil, errUnrecognized
+ case "resolution":
+ // CPP contentionz profiles don't have resolution.
+ return nil, errUnrecognized
+ case "discarded samples":
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ line := strings.TrimSpace(s.Text())
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if !isSpaceOrComment(line) {
+ value, addrs, err := parseContentionSample(line, p.Period, cpuHz)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ })
+ }
+ if !s.Scan() {
+ break
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+ sampleData := contentionSampleRE.FindStringSubmatch(line)
+ if sampleData == nil {
+ return nil, nil, errUnrecognized
+ }
+
+ v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ // Unsample values if period and cpuHz are available.
+ // - Delays are scaled to cycles and then to nanoseconds.
+ // - Contentions are scaled to cycles.
+ if period > 0 {
+ if cpuHz > 0 {
+ cpuGHz := float64(cpuHz) / 1e9
+ v1 = int64(float64(v1) * float64(period) / cpuGHz)
+ }
+ v2 = v2 * period
+ }
+
+ value = []int64{v2, v1}
+ addrs, err = parseHexAddresses(sampleData[3])
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip past comments and empty lines seeking a real header.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+
+ line := s.Text()
+ if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+ // Advance over initial comments until first stack trace.
+ for s.Scan() {
+ if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") {
+ break
+ }
+ }
+ } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+ PeriodType: &ValueType{Type: "thread", Unit: "count"},
+ Period: 1,
+ }
+
+ locs := make(map[uint64]*Location)
+ // Recognize each thread and populate profile samples.
+ for !isMemoryMapSentinel(line) {
+ if strings.HasPrefix(line, "---- no stack trace for") {
+ break
+ }
+ if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ var addrs []uint64
+ var err error
+ line, addrs, err = parseThreadSample(s)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) == 0 {
+ // We got a --same as previous threads--. Bump counters.
+ if len(p.Sample) > 0 {
+ s := p.Sample[len(p.Sample)-1]
+ s.Value[0]++
+ }
+ continue
+ }
+
+ var sloc []*Location
+ for i, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call
+ // (except for the leaf, which is not a call).
+ if i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: []int64{1},
+ Location: sloc,
+ })
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) {
+ var line string
+ sameAsPrevious := false
+ for s.Scan() {
+ line = strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if strings.Contains(line, "same as previous thread") {
+ sameAsPrevious = true
+ continue
+ }
+
+ curAddrs, err := parseHexAddresses(line)
+ if err != nil {
+ return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ addrs = append(addrs, curAddrs...)
+ }
+ if err := s.Err(); err != nil {
+ return "", nil, err
+ }
+ if sameAsPrevious {
+ return line, nil, nil
+ }
+ return line, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(s *bufio.Scanner, p *Profile) error {
+ for !isMemoryMapSentinel(s.Text()) && s.Scan() {
+ }
+ if err := s.Err(); err != nil {
+ return err
+ }
+ return p.ParseMemoryMapFromScanner(s)
+}
+
+// ParseProcMaps parses a memory map in the format of /proc/self/maps.
+// ParseMemoryMap should be called after setting on a profile to
+// associate locations to the corresponding mapping based on their
+// address.
+func ParseProcMaps(rd io.Reader) ([]*Mapping, error) {
+ s := bufio.NewScanner(rd)
+ return parseProcMapsFromScanner(s)
+}
+
+func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) {
+ var mapping []*Mapping
+
+ var attrs []string
+ const delimiter = "="
+ r := strings.NewReplacer()
+ for s.Scan() {
+ line := r.Replace(removeLoggingInfo(s.Text()))
+ m, err := parseMappingEntry(line)
+ if err != nil {
+ if err == errUnrecognized {
+ // Recognize assignments of the form: attr=value, and replace
+ // $attr with value on subsequent mappings.
+ if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 {
+ attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
+ r = strings.NewReplacer(attrs...)
+ }
+ // Ignore any unrecognized entries
+ continue
+ }
+ return nil, err
+ }
+ if m == nil {
+ continue
+ }
+ mapping = append(mapping, m)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return mapping, nil
+}
+
+// removeLoggingInfo detects and removes log prefix entries generated
+// by the glog package. If no logging prefix is detected, the string
+// is returned unmodified.
+func removeLoggingInfo(line string) string {
+ if match := logInfoRE.FindStringIndex(line); match != nil {
+ return line[match[1]:]
+ }
+ return line
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+ return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd))
+}
+
+// ParseMemoryMapFromScanner parses a memory map in the format of
+// /proc/self/maps or a variety of legacy format, and overrides the
+// mappings in the current profile. It renumbers the samples and
+// locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error {
+ mapping, err := parseProcMapsFromScanner(s)
+ if err != nil {
+ return err
+ }
+ p.Mapping = append(p.Mapping, mapping...)
+ p.massageMappings()
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+ return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+ var start, end, perm, file, offset, buildID string
+ if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 {
+ start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5]
+ } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 {
+ start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6]
+ } else {
+ return nil, errUnrecognized
+ }
+
+ var err error
+ mapping := &Mapping{
+ File: file,
+ BuildID: buildID,
+ }
+ if perm != "" && !strings.Contains(perm, "x") {
+ // Skip non-executable entries.
+ return nil, nil
+ }
+ if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if offset != "" {
+ if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ return mapping, nil
+}
+
+var memoryMapSentinels = []string{
+ "--- Memory map: ---",
+ "MAPPED_LIBRARIES:",
+}
+
+// isMemoryMapSentinel returns true if the string contains one of the
+// known sentinels for memory map information.
+func isMemoryMapSentinel(line string) bool {
+ for _, s := range memoryMapSentinels {
+ if strings.Contains(line, s) {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+ switch {
+ case isProfileType(p, heapzSampleTypes):
+ p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+ case isProfileType(p, contentionzSampleTypes):
+ p.DropFrames, p.KeepFrames = lockRxStr, ""
+ default:
+ p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+ }
+}
+
+var heapzSampleTypes = [][]string{
+ {"allocations", "size"}, // early Go pprof profiles
+ {"objects", "space"},
+ {"inuse_objects", "inuse_space"},
+ {"alloc_objects", "alloc_space"},
+ {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles
+}
+var contentionzSampleTypes = [][]string{
+ {"contentions", "delay"},
+}
+
+func isProfileType(p *Profile, types [][]string) bool {
+ st := p.SampleType
+nextType:
+ for _, t := range types {
+ if len(st) != len(t) {
+ continue
+ }
+
+ for i := range st {
+ if st[i].Type != t[i] {
+ continue nextType
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var allocRxStr = strings.Join([]string{
+ // POSIX entry points.
+ `calloc`,
+ `cfree`,
+ `malloc`,
+ `free`,
+ `memalign`,
+ `do_memalign`,
+ `(__)?posix_memalign`,
+ `pvalloc`,
+ `valloc`,
+ `realloc`,
+
+ // TC malloc.
+ `tcmalloc::.*`,
+ `tc_calloc`,
+ `tc_cfree`,
+ `tc_malloc`,
+ `tc_free`,
+ `tc_memalign`,
+ `tc_posix_memalign`,
+ `tc_pvalloc`,
+ `tc_valloc`,
+ `tc_realloc`,
+ `tc_new`,
+ `tc_delete`,
+ `tc_newarray`,
+ `tc_deletearray`,
+ `tc_new_nothrow`,
+ `tc_newarray_nothrow`,
+
+ // Memory-allocation routines on OS X.
+ `malloc_zone_malloc`,
+ `malloc_zone_calloc`,
+ `malloc_zone_valloc`,
+ `malloc_zone_realloc`,
+ `malloc_zone_memalign`,
+ `malloc_zone_free`,
+
+ // Go runtime
+ `runtime\..*`,
+
+ // Other misc. memory allocation routines
+ `BaseArena::.*`,
+ `(::)?do_malloc_no_errno`,
+ `(::)?do_malloc_pages`,
+ `(::)?do_malloc`,
+ `DoSampledAllocation`,
+ `MallocedMemBlock::MallocedMemBlock`,
+ `_M_allocate`,
+ `__builtin_(vec_)?delete`,
+ `__builtin_(vec_)?new`,
+ `__gnu_cxx::new_allocator::allocate`,
+ `__libc_malloc`,
+ `__malloc_alloc_template::allocate`,
+ `allocate`,
+ `cpp_alloc`,
+ `operator new(\[\])?`,
+ `simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+ // Preserve Go runtime frames that appear in the middle/bottom of
+ // the stack.
+ `runtime\.panic`,
+ `runtime\.reflectcall`,
+ `runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+ `ProfileData::Add`,
+ `ProfileData::prof_handler`,
+ `CpuProfiler::prof_handler`,
+ `__pthread_sighandler`,
+ `__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+ `RecordLockProfileData`,
+ `(base::)?RecordLockProfileData.*`,
+ `(base::)?SubmitMutexProfileData.*`,
+ `(base::)?SubmitSpinLockProfileData.*`,
+ `(base::Mutex::)?AwaitCommon.*`,
+ `(base::Mutex::)?Unlock.*`,
+ `(base::Mutex::)?UnlockSlow.*`,
+ `(base::Mutex::)?ReaderUnlock.*`,
+ `(base::MutexLock::)?~MutexLock.*`,
+ `(Mutex::)?AwaitCommon.*`,
+ `(Mutex::)?Unlock.*`,
+ `(Mutex::)?UnlockSlow.*`,
+ `(Mutex::)?ReaderUnlock.*`,
+ `(MutexLock::)?~MutexLock.*`,
+ `(SpinLock::)?Unlock.*`,
+ `(SpinLock::)?SlowUnlock.*`,
+ `(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
new file mode 100644
index 000000000..4b66282cb
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -0,0 +1,667 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Compact performs garbage collection on a profile to remove any
+// unreferenced fields. This is useful to reduce the size of a profile
+// after samples or locations have been removed.
+func (p *Profile) Compact() *Profile {
+ p, _ = Merge([]*Profile{p})
+ return p
+}
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one. Merges are
+// associative with the caveat of the first profile having some
+// specialization in how headers are combined. There may be other
+// subtleties now or in the future regarding associativity.
+func Merge(srcs []*Profile) (*Profile, error) {
+ if len(srcs) == 0 {
+ return nil, fmt.Errorf("no profiles to merge")
+ }
+ p, err := combineHeaders(srcs)
+ if err != nil {
+ return nil, err
+ }
+
+ pm := &profileMerger{
+ p: p,
+ samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+ locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+ functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+ mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+ }
+
+ for _, src := range srcs {
+ // Clear the profile-specific hash tables
+ pm.locationsByID = makeLocationIDMap(len(src.Location))
+ pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+ pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+ if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+ // The Mapping list has the property that the first mapping
+ // represents the main binary. Take the first Mapping we see,
+ // otherwise the operations below will add mappings in an
+ // arbitrary order.
+ pm.mapMapping(src.Mapping[0])
+ }
+
+ for _, s := range src.Sample {
+ if !isZeroSample(s) {
+ pm.mapSample(s)
+ }
+ }
+ }
+
+ for _, s := range p.Sample {
+ if isZeroSample(s) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
+ }
+ }
+
+ return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+ if err := p.compatible(pb); err != nil {
+ return err
+ }
+
+ baseVals := make([]int64, len(p.SampleType))
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ baseVals[i] += v
+ }
+ }
+
+ srcVals := make([]int64, len(p.SampleType))
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ srcVals[i] += v
+ }
+ }
+
+ normScale := make([]float64, len(baseVals))
+ for i := range baseVals {
+ if srcVals[i] == 0 {
+ normScale[i] = 0.0
+ } else {
+ normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+ }
+ }
+ p.ScaleN(normScale)
+ return nil
+}
+
+func isZeroSample(s *Sample) bool {
+ for _, v := range s.Value {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type profileMerger struct {
+ p *Profile
+
+ // Memoization tables within a profile.
+ locationsByID locationIDMap
+ functionsByID map[uint64]*Function
+ mappingsByID map[uint64]mapInfo
+
+ // Memoization tables for profile entities.
+ samples map[sampleKey]*Sample
+ locations map[locationKey]*Location
+ functions map[functionKey]*Function
+ mappings map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+ m *Mapping
+ offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ // Check memoization table
+ k := pm.sampleKey(src)
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+
+ // Make new sample.
+ s := &Sample{
+ Location: make([]*Location, len(src.Location)),
+ Value: make([]int64, len(src.Value)),
+ Label: make(map[string][]string, len(src.Label)),
+ NumLabel: make(map[string][]int64, len(src.NumLabel)),
+ NumUnit: make(map[string][]string, len(src.NumLabel)),
+ }
+ for i, l := range src.Location {
+ s.Location[i] = pm.mapLocation(l)
+ }
+ for k, v := range src.Label {
+ vv := make([]string, len(v))
+ copy(vv, v)
+ s.Label[k] = vv
+ }
+ for k, v := range src.NumLabel {
+ u := src.NumUnit[k]
+ vv := make([]int64, len(v))
+ uu := make([]string, len(u))
+ copy(vv, v)
+ copy(uu, u)
+ s.NumLabel[k] = vv
+ s.NumUnit[k] = uu
+ }
+ copy(s.Value, src.Value)
+ pm.samples[k] = s
+ pm.p.Sample = append(pm.p.Sample, s)
+ return s
+}
+
+func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
+ // Accumulate contents into a string.
+ var buf strings.Builder
+ buf.Grow(64) // Heuristic to avoid extra allocs
+
+ // encode a number
+ putNumber := func(v uint64) {
+ var num [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(num[:], v)
+ buf.Write(num[:n])
+ }
+
+ // encode a string prefixed with its length.
+ putDelimitedString := func(s string) {
+ putNumber(uint64(len(s)))
+ buf.WriteString(s)
+ }
+
+ for _, l := range sample.Location {
+ // Get the location in the merged profile, which may have a different ID.
+ if loc := pm.mapLocation(l); loc != nil {
+ putNumber(loc.ID)
+ }
+ }
+ putNumber(0) // Delimiter
+
+ for _, l := range sortedKeys1(sample.Label) {
+ putDelimitedString(l)
+ values := sample.Label[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putDelimitedString(v)
+ }
+ }
+
+ for _, l := range sortedKeys2(sample.NumLabel) {
+ putDelimitedString(l)
+ values := sample.NumLabel[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putNumber(uint64(v))
+ }
+ units := sample.NumUnit[l]
+ putNumber(uint64(len(units)))
+ for _, v := range units {
+ putDelimitedString(v)
+ }
+ }
+
+ return sampleKey(buf.String())
+}
+
+type sampleKey string
+
+// sortedKeys1 returns the sorted keys found in a string->[]string map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys2 and made into a generic function.
+func sortedKeys1(m map[string][]string) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys1 and made into a generic function.
+func sortedKeys2(m map[string][]int64) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+ if src == nil {
+ return nil
+ }
+
+ if l := pm.locationsByID.get(src.ID); l != nil {
+ return l
+ }
+
+ mi := pm.mapMapping(src.Mapping)
+ l := &Location{
+ ID: uint64(len(pm.p.Location) + 1),
+ Mapping: mi.m,
+ Address: uint64(int64(src.Address) + mi.offset),
+ Line: make([]Line, len(src.Line)),
+ IsFolded: src.IsFolded,
+ }
+ for i, ln := range src.Line {
+ l.Line[i] = pm.mapLine(ln)
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping ID.
+ k := l.key()
+ if ll, ok := pm.locations[k]; ok {
+ pm.locationsByID.set(src.ID, ll)
+ return ll
+ }
+ pm.locationsByID.set(src.ID, l)
+ pm.locations[k] = l
+ pm.p.Location = append(pm.p.Location, l)
+ return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+ key := locationKey{
+ addr: l.Address,
+ isFolded: l.IsFolded,
+ }
+ if l.Mapping != nil {
+ // Normalizes address to handle address space randomization.
+ key.addr -= l.Mapping.Start
+ key.mappingID = l.Mapping.ID
+ }
+ lines := make([]string, len(l.Line)*2)
+ for i, line := range l.Line {
+ if line.Function != nil {
+ lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+ }
+ lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ }
+ key.lines = strings.Join(lines, "|")
+ return key
+}
+
+type locationKey struct {
+ addr, mappingID uint64
+ lines string
+ isFolded bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+ if src == nil {
+ return mapInfo{}
+ }
+
+ if mi, ok := pm.mappingsByID[src.ID]; ok {
+ return mi
+ }
+
+ // Check memoization tables.
+ mk := src.key()
+ if m, ok := pm.mappings[mk]; ok {
+ mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+ }
+ m := &Mapping{
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ KernelRelocationSymbol: src.KernelRelocationSymbol,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
+ }
+ pm.p.Mapping = append(pm.p.Mapping, m)
+
+ // Update memoization tables.
+ pm.mappings[mk] = m
+ mi := mapInfo{m, 0}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+ // Normalize addresses to handle address space randomization.
+ // Round up to next 4K boundary to avoid minor discrepancies.
+ const mapsizeRounding = 0x1000
+
+ size := m.Limit - m.Start
+ size = size + mapsizeRounding - 1
+ size = size - (size % mapsizeRounding)
+ key := mappingKey{
+ size: size,
+ offset: m.Offset,
+ }
+
+ switch {
+ case m.BuildID != "":
+ key.buildIDOrFile = m.BuildID
+ case m.File != "":
+ key.buildIDOrFile = m.File
+ default:
+ // A mapping containing neither build ID nor file name is a fake mapping. A
+ // key with empty buildIDOrFile is used for fake mappings so that they are
+ // treated as the same mapping during merging.
+ }
+ return key
+}
+
+type mappingKey struct {
+ size, offset uint64
+ buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+ ln := Line{
+ Function: pm.mapFunction(src.Function),
+ Line: src.Line,
+ }
+ return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+ if src == nil {
+ return nil
+ }
+ if f, ok := pm.functionsByID[src.ID]; ok {
+ return f
+ }
+ k := src.key()
+ if f, ok := pm.functions[k]; ok {
+ pm.functionsByID[src.ID] = f
+ return f
+ }
+ f := &Function{
+ ID: uint64(len(pm.p.Function) + 1),
+ Name: src.Name,
+ SystemName: src.SystemName,
+ Filename: src.Filename,
+ StartLine: src.StartLine,
+ }
+ pm.functions[k] = f
+ pm.functionsByID[src.ID] = f
+ pm.p.Function = append(pm.p.Function, f)
+ return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+ return functionKey{
+ f.StartLine,
+ f.Name,
+ f.SystemName,
+ f.Filename,
+ }
+}
+
+type functionKey struct {
+ startLine int64
+ name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+ for _, s := range srcs[1:] {
+ if err := srcs[0].compatible(s); err != nil {
+ return nil, err
+ }
+ }
+
+ var timeNanos, durationNanos, period int64
+ var comments []string
+ seenComments := map[string]bool{}
+ var defaultSampleType string
+ for _, s := range srcs {
+ if timeNanos == 0 || s.TimeNanos < timeNanos {
+ timeNanos = s.TimeNanos
+ }
+ durationNanos += s.DurationNanos
+ if period == 0 || period < s.Period {
+ period = s.Period
+ }
+ for _, c := range s.Comments {
+ if seen := seenComments[c]; !seen {
+ comments = append(comments, c)
+ seenComments[c] = true
+ }
+ }
+ if defaultSampleType == "" {
+ defaultSampleType = s.DefaultSampleType
+ }
+ }
+
+ p := &Profile{
+ SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+ DropFrames: srcs[0].DropFrames,
+ KeepFrames: srcs[0].KeepFrames,
+
+ TimeNanos: timeNanos,
+ DurationNanos: durationNanos,
+ PeriodType: srcs[0].PeriodType,
+ Period: period,
+
+ Comments: comments,
+ DefaultSampleType: defaultSampleType,
+ }
+ copy(p.SampleType, srcs[0].SampleType)
+ return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+ if !equalValueType(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+ return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+ return st1.Type == st2.Type && st1.Unit == st2.Unit
+}
+
+// locationIDMap is like a map[uint64]*Location, but provides efficiency for
+// ids that are densely numbered, which is often the case.
+type locationIDMap struct {
+ dense []*Location // indexed by id for id < len(dense)
+ sparse map[uint64]*Location // indexed by id for id >= len(dense)
+}
+
+func makeLocationIDMap(n int) locationIDMap {
+ return locationIDMap{
+ dense: make([]*Location, n),
+ sparse: map[uint64]*Location{},
+ }
+}
+
+func (lm locationIDMap) get(id uint64) *Location {
+ if id < uint64(len(lm.dense)) {
+ return lm.dense[int(id)]
+ }
+ return lm.sparse[id]
+}
+
+func (lm locationIDMap) set(id uint64, loc *Location) {
+ if id < uint64(len(lm.dense)) {
+ lm.dense[id] = loc
+ return
+ }
+ lm.sparse[id] = loc
+}
+
+// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
+// keeps sample types that appear in all profiles only and drops/reorders the
+// sample types as necessary.
+//
+// In the case of sample types order is not the same for given profiles the
+// order is derived from the first profile.
+//
+// Profiles are modified in-place.
+//
+// It returns an error if the sample type's intersection is empty.
+func CompatibilizeSampleTypes(ps []*Profile) error {
+ sTypes := commonSampleTypes(ps)
+ if len(sTypes) == 0 {
+ return fmt.Errorf("profiles have empty common sample type list")
+ }
+ for _, p := range ps {
+ if err := compatibilizeSampleTypes(p, sTypes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// commonSampleTypes returns sample types that appear in all profiles in the
+// order how they ordered in the first profile.
+func commonSampleTypes(ps []*Profile) []string {
+ if len(ps) == 0 {
+ return nil
+ }
+ sTypes := map[string]int{}
+ for _, p := range ps {
+ for _, st := range p.SampleType {
+ sTypes[st.Type]++
+ }
+ }
+ var res []string
+ for _, st := range ps[0].SampleType {
+ if sTypes[st.Type] == len(ps) {
+ res = append(res, st.Type)
+ }
+ }
+ return res
+}
+
+// compatibilizeSampleTypes drops sample types that are not present in sTypes
+// list and reorder them if needed.
+//
+// It sets DefaultSampleType to sType[0] if it is not in sType list.
+//
+// It assumes that all sample types from the sTypes list are present in the
+// given profile otherwise it returns an error.
+func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
+ if len(sTypes) == 0 {
+ return fmt.Errorf("sample type list is empty")
+ }
+ defaultSampleType := sTypes[0]
+ reMap, needToModify := make([]int, len(sTypes)), false
+ for i, st := range sTypes {
+ if st == p.DefaultSampleType {
+ defaultSampleType = p.DefaultSampleType
+ }
+ idx := searchValueType(p.SampleType, st)
+ if idx < 0 {
+ return fmt.Errorf("%q sample type is not found in profile", st)
+ }
+ reMap[i] = idx
+ if idx != i {
+ needToModify = true
+ }
+ }
+ if !needToModify && len(sTypes) == len(p.SampleType) {
+ return nil
+ }
+ p.DefaultSampleType = defaultSampleType
+ oldSampleTypes := p.SampleType
+ p.SampleType = make([]*ValueType, len(sTypes))
+ for i, idx := range reMap {
+ p.SampleType[i] = oldSampleTypes[idx]
+ }
+ values := make([]int64, len(sTypes))
+ for _, s := range p.Sample {
+ for i, idx := range reMap {
+ values[i] = s.Value[idx]
+ }
+ s.Value = s.Value[:len(values)]
+ copy(s.Value, values)
+ }
+ return nil
+}
+
+func searchValueType(vts []*ValueType, s string) int {
+ for i, vt := range vts {
+ if vt.Type == s {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
new file mode 100644
index 000000000..60ef7e926
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -0,0 +1,856 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package profile provides a representation of profile.proto and
+// methods to encode/decode profiles in this format.
+package profile
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+ SampleType []*ValueType
+ DefaultSampleType string
+ Sample []*Sample
+ Mapping []*Mapping
+ Location []*Location
+ Function []*Function
+ Comments []string
+
+ DropFrames string
+ KeepFrames string
+
+ TimeNanos int64
+ DurationNanos int64
+ PeriodType *ValueType
+ Period int64
+
+ // The following fields are modified during encoding and copying,
+ // so are protected by a Mutex.
+ encodeMu sync.Mutex
+
+ commentX []int64
+ dropFramesX int64
+ keepFramesX int64
+ stringTable []string
+ defaultSampleTypeX int64
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+ Type string // cpu, wall, inuse_space, etc
+ Unit string // seconds, nanoseconds, bytes, etc
+
+ typeX int64
+ unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+ Location []*Location
+ Value []int64
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
+ NumLabel map[string][]int64
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
+
+ locationIDX []uint64
+ labelX []label
+}
+
+// label corresponds to Profile.Label
+type label struct {
+ keyX int64
+ // Exactly one of the two following values must be set
+ strX int64
+ numX int64 // Integer value for this label
+ // can be set if numX has value
+ unitX int64
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+ ID uint64
+ Start uint64
+ Limit uint64
+ Offset uint64
+ File string
+ BuildID string
+ HasFunctions bool
+ HasFilenames bool
+ HasLineNumbers bool
+ HasInlineFrames bool
+
+ fileX int64
+ buildIDX int64
+
+ // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
+ // For linux kernel mappings generated by some tools, correct symbolization depends
+ // on knowing which of the two possible relocation symbols was used for `Start`.
+ // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
+ //
+ // Note, this public field is not persisted in the proto. For the purposes of
+ // copying / merging / hashing profiles, it is considered subsumed by `File`.
+ KernelRelocationSymbol string
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+ IsFolded bool
+
+ mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+ Function *Function
+ Line int64
+
+ functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+ ID uint64
+ Name string
+ SystemName string
+ Filename string
+ StartLine int64
+
+ nameX int64
+ systemNameX int64
+ filenameX int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+ data, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return ParseData(data)
+}
+
+// ParseData parses a profile from a buffer and checks for its
+// validity.
+func ParseData(data []byte) (*Profile, error) {
+ var p *Profile
+ var err error
+ if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err == nil {
+ data, err = io.ReadAll(gz)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ }
+ if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
+ p, err = parseLegacy(data)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("parsing profile: %v", err)
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("malformed profile: %v", err)
+ }
+ return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+var errNoData = fmt.Errorf("empty input file")
+var errConcatProfile = fmt.Errorf("concatenated profiles detected")
+
+func parseLegacy(data []byte) (*Profile, error) {
+ parsers := []func([]byte) (*Profile, error){
+ parseCPU,
+ parseHeap,
+ parseGoCount, // goroutine, threadcreate
+ parseThread,
+ parseContention,
+ parseJavaProfile,
+ }
+
+ for _, parser := range parsers {
+ p, err := parser(data)
+ if err == nil {
+ p.addLegacyFrameInfo()
+ return p, nil
+ }
+ if err != errUnrecognized {
+ return nil, err
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// ParseUncompressed parses an uncompressed protobuf into a profile.
+func ParseUncompressed(data []byte) (*Profile, error) {
+ if len(data) == 0 {
+ return nil, errNoData
+ }
+ p := &Profile{}
+ if err := unmarshal(data, p); err != nil {
+ return nil, err
+ }
+
+ if err := p.postDecode(); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
+
+// massageMappings applies heuristic-based changes to the profile
+// mappings to account for quirks of some environments.
+func (p *Profile) massageMappings() {
+ // Merge adjacent regions with matching names, checking that the offsets match
+ if len(p.Mapping) > 1 {
+ mappings := []*Mapping{p.Mapping[0]}
+ for _, m := range p.Mapping[1:] {
+ lm := mappings[len(mappings)-1]
+ if adjacent(lm, m) {
+ lm.Limit = m.Limit
+ if m.File != "" {
+ lm.File = m.File
+ }
+ if m.BuildID != "" {
+ lm.BuildID = m.BuildID
+ }
+ p.updateLocationMapping(m, lm)
+ continue
+ }
+ mappings = append(mappings, m)
+ }
+ p.Mapping = mappings
+ }
+
+ // Use heuristics to identify main binary and move it to the top of the list of mappings
+ for i, m := range p.Mapping {
+ file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
+ if len(file) == 0 {
+ continue
+ }
+ if len(libRx.FindStringSubmatch(file)) > 0 {
+ continue
+ }
+ if file[0] == '[' {
+ continue
+ }
+ // Swap what we guess is main to position 0.
+ p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
+ break
+ }
+
+ // Keep the mapping IDs neatly sorted
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+// adjacent returns whether two mapping entries represent the same
+// mapping that has been split into two. Check that their addresses are adjacent,
+// and if the offsets match, if they are available.
+func adjacent(m1, m2 *Mapping) bool {
+ if m1.File != "" && m2.File != "" {
+ if m1.File != m2.File {
+ return false
+ }
+ }
+ if m1.BuildID != "" && m2.BuildID != "" {
+ if m1.BuildID != m2.BuildID {
+ return false
+ }
+ }
+ if m1.Limit != m2.Start {
+ return false
+ }
+ if m1.Offset != 0 && m2.Offset != 0 {
+ offset := m1.Offset + (m1.Limit - m1.Start)
+ if offset != m2.Offset {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *Profile) updateLocationMapping(from, to *Mapping) {
+ for _, l := range p.Location {
+ if l.Mapping == from {
+ l.Mapping = to
+ }
+ }
+}
+
+func serialize(p *Profile) []byte {
+ p.encodeMu.Lock()
+ p.preEncode()
+ b := marshal(p)
+ p.encodeMu.Unlock()
+ return b
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+ _, err := zw.Write(serialize(p))
+ return err
+}
+
+// WriteUncompressed writes the profile as a marshaled protobuf.
+func (p *Profile) WriteUncompressed(w io.Writer) error {
+ _, err := w.Write(serialize(p))
+ return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+// - len(Profile.Sample[n].value) == len(Profile.value_unit)
+// - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+ // Check that sample values are consistent
+ sampleLen := len(p.SampleType)
+ if sampleLen == 0 && len(p.Sample) != 0 {
+ return fmt.Errorf("missing sample type information")
+ }
+ for _, s := range p.Sample {
+ if s == nil {
+ return fmt.Errorf("profile has nil sample")
+ }
+ if len(s.Value) != sampleLen {
+ return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
+ }
+ for _, l := range s.Location {
+ if l == nil {
+ return fmt.Errorf("sample has nil location")
+ }
+ }
+ }
+
+ // Check that all mappings/locations/functions are in the tables
+ // Check that there are no duplicate ids
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ for _, m := range p.Mapping {
+ if m == nil {
+ return fmt.Errorf("profile has nil mapping")
+ }
+ if m.ID == 0 {
+ return fmt.Errorf("found mapping with reserved ID=0")
+ }
+ if mappings[m.ID] != nil {
+ return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+ }
+ mappings[m.ID] = m
+ }
+ functions := make(map[uint64]*Function, len(p.Function))
+ for _, f := range p.Function {
+ if f == nil {
+ return fmt.Errorf("profile has nil function")
+ }
+ if f.ID == 0 {
+ return fmt.Errorf("found function with reserved ID=0")
+ }
+ if functions[f.ID] != nil {
+ return fmt.Errorf("multiple functions with same id: %d", f.ID)
+ }
+ functions[f.ID] = f
+ }
+ locations := make(map[uint64]*Location, len(p.Location))
+ for _, l := range p.Location {
+ if l == nil {
+ return fmt.Errorf("profile has nil location")
+ }
+ if l.ID == 0 {
+ return fmt.Errorf("found location with reserved id=0")
+ }
+ if locations[l.ID] != nil {
+ return fmt.Errorf("multiple locations with same id: %d", l.ID)
+ }
+ locations[l.ID] = l
+ if m := l.Mapping; m != nil {
+ if m.ID == 0 || mappings[m.ID] != m {
+ return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+ }
+ }
+ for _, ln := range l.Line {
+ f := ln.Function
+ if f == nil {
+ return fmt.Errorf("location id: %d has a line with nil function", l.ID)
+ }
+ if f.ID == 0 || functions[f.ID] != f {
+ return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+ }
+ }
+ }
+ return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
+ for _, m := range p.Mapping {
+ m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+ m.HasFunctions = m.HasFunctions && function
+ m.HasFilenames = m.HasFilenames && filename
+ m.HasLineNumbers = m.HasLineNumbers && linenumber
+ }
+
+ // Aggregate functions
+ if !function || !filename {
+ for _, f := range p.Function {
+ if !function {
+ f.Name = ""
+ f.SystemName = ""
+ }
+ if !filename {
+ f.Filename = ""
+ }
+ }
+ }
+
+ // Aggregate locations
+ if !inlineFrame || !address || !linenumber {
+ for _, l := range p.Location {
+ if !inlineFrame && len(l.Line) > 1 {
+ l.Line = l.Line[len(l.Line)-1:]
+ }
+ if !linenumber {
+ for i := range l.Line {
+ l.Line[i].Line = 0
+ }
+ }
+ if !address {
+ l.Address = 0
+ }
+ }
+ }
+
+ return p.CheckValid()
+}
+
+// NumLabelUnits returns a map of numeric label keys to the units
+// associated with those keys and a map of those keys to any units
+// that were encountered but not used.
+// Unit for a given key is the first encountered unit for that key. If multiple
+// units are encountered for values paired with a particular key, then the first
+// unit encountered is used and all other units are returned in sorted order
+// in map of ignored units.
+// If no units are encountered for a particular key, the unit is then inferred
+// based on the key.
+func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
+ numLabelUnits := map[string]string{}
+ ignoredUnits := map[string]map[string]bool{}
+ encounteredKeys := map[string]bool{}
+
+ // Determine units based on numeric tags for each sample.
+ for _, s := range p.Sample {
+ for k := range s.NumLabel {
+ encounteredKeys[k] = true
+ for _, unit := range s.NumUnit[k] {
+ if unit == "" {
+ continue
+ }
+ if wantUnit, ok := numLabelUnits[k]; !ok {
+ numLabelUnits[k] = unit
+ } else if wantUnit != unit {
+ if v, ok := ignoredUnits[k]; ok {
+ v[unit] = true
+ } else {
+ ignoredUnits[k] = map[string]bool{unit: true}
+ }
+ }
+ }
+ }
+ }
+ // Infer units for keys without any units associated with
+ // numeric tag values.
+ for key := range encounteredKeys {
+ unit := numLabelUnits[key]
+ if unit == "" {
+ switch key {
+ case "alignment", "request":
+ numLabelUnits[key] = "bytes"
+ default:
+ numLabelUnits[key] = key
+ }
+ }
+ }
+
+ // Copy ignored units into more readable format
+ unitsIgnored := make(map[string][]string, len(ignoredUnits))
+ for key, values := range ignoredUnits {
+ units := make([]string, len(values))
+ i := 0
+ for unit := range values {
+ units[i] = unit
+ i++
+ }
+ sort.Strings(units)
+ unitsIgnored[key] = units
+ }
+
+ return numLabelUnits, unitsIgnored
+}
+
+// String dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+ ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
+ for _, c := range p.Comments {
+ ss = append(ss, "Comment: "+c)
+ }
+ if pt := p.PeriodType; pt != nil {
+ ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+ }
+ ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+ if p.TimeNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+ }
+ if p.DurationNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
+ }
+
+ ss = append(ss, "Samples:")
+ var sh1 string
+ for _, s := range p.SampleType {
+ dflt := ""
+ if s.Type == p.DefaultSampleType {
+ dflt = "[dflt]"
+ }
+ sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
+ }
+ ss = append(ss, strings.TrimSpace(sh1))
+ for _, s := range p.Sample {
+ ss = append(ss, s.string())
+ }
+
+ ss = append(ss, "Locations")
+ for _, l := range p.Location {
+ ss = append(ss, l.string())
+ }
+
+ ss = append(ss, "Mappings")
+ for _, m := range p.Mapping {
+ ss = append(ss, m.string())
+ }
+
+ return strings.Join(ss, "\n") + "\n"
+}
+
+// string dumps a text representation of a mapping. Intended mainly
+// for debugging purposes.
+func (m *Mapping) string() string {
+ bits := ""
+ if m.HasFunctions {
+ bits = bits + "[FN]"
+ }
+ if m.HasFilenames {
+ bits = bits + "[FL]"
+ }
+ if m.HasLineNumbers {
+ bits = bits + "[LN]"
+ }
+ if m.HasInlineFrames {
+ bits = bits + "[IN]"
+ }
+ return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+ m.ID,
+ m.Start, m.Limit, m.Offset,
+ m.File,
+ m.BuildID,
+ bits)
+}
+
+// string dumps a text representation of a location. Intended mainly
+// for debugging purposes.
+func (l *Location) string() string {
+ ss := []string{}
+ locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+ if m := l.Mapping; m != nil {
+ locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+ }
+ if l.IsFolded {
+ locStr = locStr + "[F] "
+ }
+ if len(l.Line) == 0 {
+ ss = append(ss, locStr)
+ }
+ for li := range l.Line {
+ lnStr := "??"
+ if fn := l.Line[li].Function; fn != nil {
+ lnStr = fmt.Sprintf("%s %s:%d s=%d",
+ fn.Name,
+ fn.Filename,
+ l.Line[li].Line,
+ fn.StartLine)
+ if fn.Name != fn.SystemName {
+ lnStr = lnStr + "(" + fn.SystemName + ")"
+ }
+ }
+ ss = append(ss, locStr+lnStr)
+ // Do not print location details past the first line
+ locStr = " "
+ }
+ return strings.Join(ss, "\n")
+}
+
+// string dumps a text representation of a sample. Intended mainly
+// for debugging purposes.
+func (s *Sample) string() string {
+ ss := []string{}
+ var sv string
+ for _, v := range s.Value {
+ sv = fmt.Sprintf("%s %10d", sv, v)
+ }
+ sv = sv + ": "
+ for _, l := range s.Location {
+ sv = sv + fmt.Sprintf("%d ", l.ID)
+ }
+ ss = append(ss, sv)
+ const labelHeader = " "
+ if len(s.Label) > 0 {
+ ss = append(ss, labelHeader+labelsToString(s.Label))
+ }
+ if len(s.NumLabel) > 0 {
+ ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
+ }
+ return strings.Join(ss, "\n")
+}
+
+// labelsToString returns a string representation of a
+// map representing labels.
+func labelsToString(labels map[string][]string) string {
+ ls := []string{}
+ for k, v := range labels {
+ ls = append(ls, fmt.Sprintf("%s:%v", k, v))
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// numLabelsToString returns a string representation of a map
+// representing numeric labels.
+func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
+ ls := []string{}
+ for k, v := range numLabels {
+ units := numUnits[k]
+ var labelString string
+ if len(units) == len(v) {
+ values := make([]string, len(v))
+ for i, vv := range v {
+ values[i] = fmt.Sprintf("%d %s", vv, units[i])
+ }
+ labelString = fmt.Sprintf("%s:%v", k, values)
+ } else {
+ labelString = fmt.Sprintf("%s:%v", k, v)
+ }
+ ls = append(ls, labelString)
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// SetLabel sets the specified key to the specified value for all samples in the
+// profile.
+func (p *Profile) SetLabel(key string, value []string) {
+ for _, sample := range p.Sample {
+ if sample.Label == nil {
+ sample.Label = map[string][]string{key: value}
+ } else {
+ sample.Label[key] = value
+ }
+ }
+}
+
+// RemoveLabel removes all labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.Label, key)
+ }
+}
+
+// HasLabel returns true if a sample has a label with indicated key and value.
+func (s *Sample) HasLabel(key, value string) bool {
+ for _, v := range s.Label[key] {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
+// DiffBaseSample returns true if a sample belongs to the diff base and false
+// otherwise.
+func (s *Sample) DiffBaseSample() bool {
+ return s.HasLabel("pprof::base", "true")
+}
+
+// Scale multiplies all sample values in a profile by a constant and keeps
+// only samples that have at least one non-zero value.
+func (p *Profile) Scale(ratio float64) {
+ if ratio == 1 {
+ return
+ }
+ ratios := make([]float64, len(p.SampleType))
+ for i := range p.SampleType {
+ ratios[i] = ratio
+ }
+ p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount
+// and keeps only samples that have at least one non-zero value.
+func (p *Profile) ScaleN(ratios []float64) error {
+ if len(p.SampleType) != len(ratios) {
+ return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+ }
+ allOnes := true
+ for _, r := range ratios {
+ if r != 1 {
+ allOnes = false
+ break
+ }
+ }
+ if allOnes {
+ return nil
+ }
+ fillIdx := 0
+ for _, s := range p.Sample {
+ keepSample := false
+ for i, v := range s.Value {
+ if ratios[i] != 1 {
+ val := int64(math.Round(float64(v) * ratios[i]))
+ s.Value[i] = val
+ keepSample = keepSample || val != 0
+ }
+ }
+ if keepSample {
+ p.Sample[fillIdx] = s
+ fillIdx++
+ }
+ }
+ p.Sample = p.Sample[:fillIdx]
+ return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && !l.Mapping.HasFunctions {
+ return false
+ }
+ }
+ return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+ return false
+ }
+ }
+ return true
+}
+
+// Unsymbolizable returns true if a mapping points to a binary for which
+// locations can't be symbolized in principle, at least now. Examples are
+// "[vdso]", [vsyscall]" and some others, see the code.
+func (m *Mapping) Unsymbolizable() bool {
+ name := filepath.Base(m.File)
+ return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+ pp := &Profile{}
+ if err := unmarshal(serialize(p), pp); err != nil {
+ panic(err)
+ }
+ if err := pp.postDecode(); err != nil {
+ panic(err)
+ }
+
+ return pp
+}
diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go
new file mode 100644
index 000000000..a15696ba1
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/proto.go
@@ -0,0 +1,367 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a simple protocol buffer encoder and decoder.
+// The format is described at
+// https://developers.google.com/protocol-buffers/docs/encoding
+//
+// A protocol message must implement the message interface:
+// decoder() []decoder
+// encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+)
+
+type buffer struct {
+ field int // field tag
+ typ int // proto wire type code for field
+ u64 uint64
+ data []byte
+ tmp [16]byte
+ tmpLines []Line // temporary storage used while decoding "repeated Line".
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+ decoder() []decoder
+ encode(*buffer)
+}
+
+func marshal(m message) []byte {
+ var b buffer
+ m.encode(&b)
+ return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+ encodeVarint(b, uint64(tag)<<3|2)
+ encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+ // append varint to b.data
+ encodeVarint(b, uint64(tag)<<3)
+ encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, u)
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeUint64(b, tag, u)
+ }
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+ u := uint64(x)
+ encodeUint64(b, tag, u)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, uint64(u))
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeInt64(b, tag, u)
+ }
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ encodeInt64(b, tag, x)
+}
+
+func encodeString(b *buffer, tag int, x string) {
+ encodeLength(b, tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+ for _, s := range x {
+ encodeString(b, tag, s)
+ }
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+ if x {
+ encodeUint64(b, tag, 1)
+ } else {
+ encodeUint64(b, tag, 0)
+ }
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+ if x {
+ encodeBool(b, tag, x)
+ }
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+ n1 := len(b.data)
+ m.encode(b)
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+ b := buffer{data: data, typ: 2}
+ return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+ return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+ var u uint64
+ for i := 0; ; i++ {
+ if i >= 10 || i >= len(data) {
+ return 0, nil, errors.New("bad varint")
+ }
+ u |= uint64(data[i]&0x7F) << uint(7*i)
+ if data[i]&0x80 == 0 {
+ return u, data[i+1:], nil
+ }
+ }
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+ x, data, err := decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ b.field = int(x >> 3)
+ b.typ = int(x & 7)
+ b.data = nil
+ b.u64 = 0
+ switch b.typ {
+ case 0:
+ b.u64, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ case 1:
+ if len(data) < 8 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = le64(data[:8])
+ data = data[8:]
+ case 2:
+ var n uint64
+ n, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ if n > uint64(len(data)) {
+ return nil, errors.New("too much data")
+ }
+ b.data = data[:n]
+ data = data[n:]
+ case 5:
+ if len(data) < 4 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = uint64(le32(data[:4]))
+ data = data[4:]
+ default:
+ return nil, fmt.Errorf("unknown wire type: %d", b.typ)
+ }
+
+ return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+ if b.typ != typ {
+ return errors.New("type mismatch")
+ }
+ return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ dec := m.decoder()
+ data := b.data
+ for len(data) > 0 {
+ // pull varint field# + type
+ var err error
+ data, err = decodeField(b, data)
+ if err != nil {
+ return err
+ }
+ if b.field >= len(dec) || dec[b.field] == nil {
+ continue
+ }
+ if err := dec[b.field](b, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = int64(b.u64)
+ return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+ if b.typ == 2 {
+ // Packed encoding
+ data := b.data
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, int64(u))
+ }
+ return nil
+ }
+ var i int64
+ if err := decodeInt64(b, &i); err != nil {
+ return err
+ }
+ *x = append(*x, i)
+ return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = b.u64
+ return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+ if b.typ == 2 {
+ data := b.data
+ // Packed encoding
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ }
+ return nil
+ }
+ var u uint64
+ if err := decodeUint64(b, &u); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ *x = string(b.data)
+ return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+ var s string
+ if err := decodeString(b, &s); err != nil {
+ return err
+ }
+ *x = append(*x, s)
+ return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ if int64(b.u64) == 0 {
+ *x = false
+ } else {
+ *x = true
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go
new file mode 100644
index 000000000..b2f9fd546
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/prune.go
@@ -0,0 +1,194 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var (
+ reservedNames = []string{"(anonymous namespace)", "operator()"}
+ bracketRx = func() *regexp.Regexp {
+ var quotedNames []string
+ for _, name := range append(reservedNames, "(") {
+ quotedNames = append(quotedNames, regexp.QuoteMeta(name))
+ }
+ return regexp.MustCompile(strings.Join(quotedNames, "|"))
+ }()
+)
+
+// simplifyFunc does some primitive simplification of function names.
+func simplifyFunc(f string) string {
+ // Account for leading '.' on the PPC ELF v1 ABI.
+ funcName := strings.TrimPrefix(f, ".")
+ // Account for unsimplified names -- try to remove the argument list by trimming
+ // starting from the first '(', but skipping reserved names that have '('.
+ for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
+ foundReserved := false
+ for _, res := range reservedNames {
+ if funcName[ind[0]:ind[1]] == res {
+ foundReserved = true
+ break
+ }
+ }
+ if !foundReserved {
+ funcName = funcName[:ind[0]]
+ break
+ }
+ }
+ return funcName
+}
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+ prune := make(map[uint64]bool)
+ pruneBeneath := make(map[uint64]bool)
+
+ // simplifyFunc can be expensive, so cache results.
+ // Note that the same function name can be encountered many times due
+ // different lines and addresses in the same function.
+ pruneCache := map[string]bool{} // Map from function to whether or not to prune
+ pruneFromHere := func(s string) bool {
+ if r, ok := pruneCache[s]; ok {
+ return r
+ }
+ funcName := simplifyFunc(s)
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ pruneCache[s] = true
+ return true
+ }
+ }
+ pruneCache[s] = false
+ return false
+ }
+
+ for _, loc := range p.Location {
+ var i int
+ for i = len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ if pruneFromHere(fn.Name) {
+ break
+ }
+ }
+ }
+
+ if i >= 0 {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+
+ // Remove the matching location.
+ if i == len(loc.Line)-1 {
+ // Matched the top entry: prune the whole location.
+ prune[loc.ID] = true
+ } else {
+ loc.Line = loc.Line[i+1:]
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the root to the leaves to find the prune location.
+ // Do not prune frames before the first user frame, to avoid
+ // pruning everything.
+ foundUser := false
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ id := sample.Location[i].ID
+ if !prune[id] && !pruneBeneath[id] {
+ foundUser = true
+ continue
+ }
+ if !foundUser {
+ continue
+ }
+ if prune[id] {
+ sample.Location = sample.Location[i+1:]
+ break
+ }
+ if pruneBeneath[id] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+ var keep, drop *regexp.Regexp
+ var err error
+
+ if p.DropFrames != "" {
+ if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+ }
+ if p.KeepFrames != "" {
+ if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+ }
+ }
+ p.Prune(drop, keep)
+ }
+ return nil
+}
+
+// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
+//
+// Please see the example below to understand this method as well as
+// the difference from Prune method.
+//
+// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
+//
+// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
+// Prune(A, nil) returns [B,C,B,D] by removing A itself.
+//
+// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
+// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
+func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
+ pruneBeneath := make(map[uint64]bool)
+
+ for _, loc := range p.Location {
+ for i := 0; i < len(loc.Line); i++ {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ funcName := simplifyFunc(fn.Name)
+ if dropRx.MatchString(funcName) {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+ loc.Line = loc.Line[i:]
+ break
+ }
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the bottom leaf to the root to find the prune location.
+ for i, loc := range sample.Location {
+ if pruneBeneath[loc.ID] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
index a733bef18..44e368e56 100644
--- a/vendor/github.com/hashicorp/errwrap/errwrap.go
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -44,6 +44,8 @@ func Wrap(outer, inner error) error {
//
// format is the format of the error message. The string '{{err}}' will
// be replaced with the original error message.
+//
+// Deprecated: Use fmt.Errorf()
func Wrapf(format string, err error) error {
outerMsg := ""
if err != nil {
@@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) {
for _, err := range e.WrappedErrors() {
Walk(err, cb)
}
+ case interface{ Unwrap() error }:
+ cb(err)
+ Walk(e.Unwrap(), cb)
default:
cb(err)
}
@@ -167,3 +172,7 @@ func (w *wrappedError) Error() string {
func (w *wrappedError) WrappedErrors() []error {
return []error{w.Outer, w.Inner}
}
+
+func (w *wrappedError) Unwrap() error {
+ return w.Inner
+}
diff --git a/vendor/github.com/huin/goupnp/README.md b/vendor/github.com/huin/goupnp/README.md
index cd837978e..49bd03888 100644
--- a/vendor/github.com/huin/goupnp/README.md
+++ b/vendor/github.com/huin/goupnp/README.md
@@ -63,3 +63,14 @@ func init() {
goupnp.CharsetReaderFault = charset.NewReaderLabel
}
```
+
+## `v2alpha`
+
+The `v2alpha` subdirectory contains experimental work on a version 2 API. The plan is to eventually
+create a `v2` subdirectory with a stable version of the version 2 API. The v1 API will stay where
+it currently is.
+
+> NOTE:
+>
+> * `v2alpha` will be deleted one day, so don't rely on it always existing.
+> * `v2alpha` will have API breaking changes, even with itself.
diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go b/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go
index 2b146a345..e6af2bb13 100644
--- a/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go
+++ b/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go
@@ -1,2 +1,2 @@
-//go:generate goupnpdcpgen -dcp_name internetgateway1
+//go:generate goupnpdcpgen -dcp_name internetgateway1 -code_tmpl_file ../dcps.gotemplate
package internetgateway1
diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go b/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
index 942bb0921..098083b02 100644
--- a/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
+++ b/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
@@ -49,35 +49,47 @@ type LANHostConfigManagement1 struct {
goupnp.ServiceClient
}
-// NewLANHostConfigManagement1Clients discovers instances of the service on the network,
+// NewLANHostConfigManagement1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) {
+func NewLANHostConfigManagement1ClientsCtx(ctx context.Context) (clients []*LANHostConfigManagement1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_LANHostConfigManagement_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_LANHostConfigManagement_1); err != nil {
return
}
clients = newLANHostConfigManagement1ClientsFromGenericClients(genericClients)
return
}
-// NewLANHostConfigManagement1ClientsByURL discovers instances of the service at the given
+// NewLANHostConfigManagement1Clients is the legacy version of NewLANHostConfigManagement1ClientsCtx, but uses
+// context.Background() as the context.
+func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) {
+ return NewLANHostConfigManagement1ClientsCtx(context.Background())
+}
+
+// NewLANHostConfigManagement1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_LANHostConfigManagement_1)
+func NewLANHostConfigManagement1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_LANHostConfigManagement_1)
if err != nil {
return nil, err
}
return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil
}
+// NewLANHostConfigManagement1ClientsByURL is the legacy version of NewLANHostConfigManagement1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ return NewLANHostConfigManagement1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewLANHostConfigManagement1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -798,35 +810,47 @@ type Layer3Forwarding1 struct {
goupnp.ServiceClient
}
-// NewLayer3Forwarding1Clients discovers instances of the service on the network,
+// NewLayer3Forwarding1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) {
+func NewLayer3Forwarding1ClientsCtx(ctx context.Context) (clients []*Layer3Forwarding1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_Layer3Forwarding_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_Layer3Forwarding_1); err != nil {
return
}
clients = newLayer3Forwarding1ClientsFromGenericClients(genericClients)
return
}
-// NewLayer3Forwarding1ClientsByURL discovers instances of the service at the given
+// NewLayer3Forwarding1Clients is the legacy version of NewLayer3Forwarding1ClientsCtx, but uses
+// context.Background() as the context.
+func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) {
+ return NewLayer3Forwarding1ClientsCtx(context.Background())
+}
+
+// NewLayer3Forwarding1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_Layer3Forwarding_1)
+func NewLayer3Forwarding1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*Layer3Forwarding1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_Layer3Forwarding_1)
if err != nil {
return nil, err
}
return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil
}
+// NewLayer3Forwarding1ClientsByURL is the legacy version of NewLayer3Forwarding1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) {
+ return NewLayer3Forwarding1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewLayer3Forwarding1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -929,35 +953,47 @@ type WANCableLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANCableLinkConfig1Clients discovers instances of the service on the network,
+// NewWANCableLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) {
+func NewWANCableLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANCableLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCableLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCableLinkConfig_1); err != nil {
return
}
clients = newWANCableLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANCableLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANCableLinkConfig1Clients is the legacy version of NewWANCableLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) {
+ return NewWANCableLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANCableLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCableLinkConfig_1)
+func NewWANCableLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCableLinkConfig_1)
if err != nil {
return nil, err
}
return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANCableLinkConfig1ClientsByURL is the legacy version of NewWANCableLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ return NewWANCableLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANCableLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -1347,35 +1383,47 @@ type WANCommonInterfaceConfig1 struct {
goupnp.ServiceClient
}
-// NewWANCommonInterfaceConfig1Clients discovers instances of the service on the network,
+// NewWANCommonInterfaceConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
+func NewWANCommonInterfaceConfig1ClientsCtx(ctx context.Context) (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCommonInterfaceConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCommonInterfaceConfig_1); err != nil {
return
}
clients = newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANCommonInterfaceConfig1ClientsByURL discovers instances of the service at the given
+// NewWANCommonInterfaceConfig1Clients is the legacy version of NewWANCommonInterfaceConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
+ return NewWANCommonInterfaceConfig1ClientsCtx(context.Background())
+}
+
+// NewWANCommonInterfaceConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCommonInterfaceConfig_1)
+func NewWANCommonInterfaceConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCommonInterfaceConfig_1)
if err != nil {
return nil, err
}
return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANCommonInterfaceConfig1ClientsByURL is the legacy version of NewWANCommonInterfaceConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ return NewWANCommonInterfaceConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANCommonInterfaceConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -1784,35 +1832,47 @@ type WANDSLLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANDSLLinkConfig1Clients discovers instances of the service on the network,
+// NewWANDSLLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) {
+func NewWANDSLLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANDSLLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANDSLLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANDSLLinkConfig_1); err != nil {
return
}
clients = newWANDSLLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANDSLLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANDSLLinkConfig1Clients is the legacy version of NewWANDSLLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) {
+ return NewWANDSLLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANDSLLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANDSLLinkConfig_1)
+func NewWANDSLLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANDSLLinkConfig_1)
if err != nil {
return nil, err
}
return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANDSLLinkConfig1ClientsByURL is the legacy version of NewWANDSLLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ return NewWANDSLLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANDSLLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -2204,35 +2264,47 @@ type WANEthernetLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANEthernetLinkConfig1Clients discovers instances of the service on the network,
+// NewWANEthernetLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) {
+func NewWANEthernetLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANEthernetLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANEthernetLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANEthernetLinkConfig_1); err != nil {
return
}
clients = newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANEthernetLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANEthernetLinkConfig1Clients is the legacy version of NewWANEthernetLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) {
+ return NewWANEthernetLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANEthernetLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANEthernetLinkConfig_1)
+func NewWANEthernetLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANEthernetLinkConfig_1)
if err != nil {
return nil, err
}
return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANEthernetLinkConfig1ClientsByURL is the legacy version of NewWANEthernetLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ return NewWANEthernetLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANEthernetLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -2302,35 +2374,47 @@ type WANIPConnection1 struct {
goupnp.ServiceClient
}
-// NewWANIPConnection1Clients discovers instances of the service on the network,
+// NewWANIPConnection1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) {
+func NewWANIPConnection1ClientsCtx(ctx context.Context) (clients []*WANIPConnection1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPConnection_1); err != nil {
return
}
clients = newWANIPConnection1ClientsFromGenericClients(genericClients)
return
}
-// NewWANIPConnection1ClientsByURL discovers instances of the service at the given
+// NewWANIPConnection1Clients is the legacy version of NewWANIPConnection1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) {
+ return NewWANIPConnection1ClientsCtx(context.Background())
+}
+
+// NewWANIPConnection1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_1)
+func NewWANIPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPConnection_1)
if err != nil {
return nil, err
}
return newWANIPConnection1ClientsFromGenericClients(genericClients), nil
}
+// NewWANIPConnection1ClientsByURL is the legacy version of NewWANIPConnection1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) {
+ return NewWANIPConnection1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANIPConnection1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -3148,35 +3232,47 @@ type WANPOTSLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANPOTSLinkConfig1Clients discovers instances of the service on the network,
+// NewWANPOTSLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) {
+func NewWANPOTSLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANPOTSLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPOTSLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPOTSLinkConfig_1); err != nil {
return
}
clients = newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANPOTSLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANPOTSLinkConfig1Clients is the legacy version of NewWANPOTSLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) {
+ return NewWANPOTSLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANPOTSLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPOTSLinkConfig_1)
+func NewWANPOTSLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPOTSLinkConfig_1)
if err != nil {
return nil, err
}
return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANPOTSLinkConfig1ClientsByURL is the legacy version of NewWANPOTSLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ return NewWANPOTSLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANPOTSLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -3559,35 +3655,47 @@ type WANPPPConnection1 struct {
goupnp.ServiceClient
}
-// NewWANPPPConnection1Clients discovers instances of the service on the network,
+// NewWANPPPConnection1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) {
+func NewWANPPPConnection1ClientsCtx(ctx context.Context) (clients []*WANPPPConnection1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPPPConnection_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPPPConnection_1); err != nil {
return
}
clients = newWANPPPConnection1ClientsFromGenericClients(genericClients)
return
}
-// NewWANPPPConnection1ClientsByURL discovers instances of the service at the given
+// NewWANPPPConnection1Clients is the legacy version of NewWANPPPConnection1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) {
+ return NewWANPPPConnection1ClientsCtx(context.Background())
+}
+
+// NewWANPPPConnection1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPPPConnection_1)
+func NewWANPPPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPPPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPPPConnection_1)
if err != nil {
return nil, err
}
return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil
}
+// NewWANPPPConnection1ClientsByURL is the legacy version of NewWANPPPConnection1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) {
+ return NewWANPPPConnection1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANPPPConnection1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go b/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go
index 752058b41..88f8d77ad 100644
--- a/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go
+++ b/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go
@@ -1,2 +1,2 @@
-//go:generate goupnpdcpgen -dcp_name internetgateway2
+//go:generate goupnpdcpgen -dcp_name internetgateway2 -code_tmpl_file ../dcps.gotemplate
package internetgateway2
diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go b/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
index e79d7824f..42a157869 100644
--- a/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
+++ b/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
@@ -54,35 +54,47 @@ type DeviceProtection1 struct {
goupnp.ServiceClient
}
-// NewDeviceProtection1Clients discovers instances of the service on the network,
+// NewDeviceProtection1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewDeviceProtection1Clients() (clients []*DeviceProtection1, errors []error, err error) {
+func NewDeviceProtection1ClientsCtx(ctx context.Context) (clients []*DeviceProtection1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_DeviceProtection_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_DeviceProtection_1); err != nil {
return
}
clients = newDeviceProtection1ClientsFromGenericClients(genericClients)
return
}
-// NewDeviceProtection1ClientsByURL discovers instances of the service at the given
+// NewDeviceProtection1Clients is the legacy version of NewDeviceProtection1ClientsCtx, but uses
+// context.Background() as the context.
+func NewDeviceProtection1Clients() (clients []*DeviceProtection1, errors []error, err error) {
+ return NewDeviceProtection1ClientsCtx(context.Background())
+}
+
+// NewDeviceProtection1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewDeviceProtection1ClientsByURL(loc *url.URL) ([]*DeviceProtection1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_DeviceProtection_1)
+func NewDeviceProtection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*DeviceProtection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_DeviceProtection_1)
if err != nil {
return nil, err
}
return newDeviceProtection1ClientsFromGenericClients(genericClients), nil
}
+// NewDeviceProtection1ClientsByURL is the legacy version of NewDeviceProtection1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewDeviceProtection1ClientsByURL(loc *url.URL) ([]*DeviceProtection1, error) {
+ return NewDeviceProtection1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewDeviceProtection1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -672,35 +684,47 @@ type LANHostConfigManagement1 struct {
goupnp.ServiceClient
}
-// NewLANHostConfigManagement1Clients discovers instances of the service on the network,
+// NewLANHostConfigManagement1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) {
+func NewLANHostConfigManagement1ClientsCtx(ctx context.Context) (clients []*LANHostConfigManagement1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_LANHostConfigManagement_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_LANHostConfigManagement_1); err != nil {
return
}
clients = newLANHostConfigManagement1ClientsFromGenericClients(genericClients)
return
}
-// NewLANHostConfigManagement1ClientsByURL discovers instances of the service at the given
+// NewLANHostConfigManagement1Clients is the legacy version of NewLANHostConfigManagement1ClientsCtx, but uses
+// context.Background() as the context.
+func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) {
+ return NewLANHostConfigManagement1ClientsCtx(context.Background())
+}
+
+// NewLANHostConfigManagement1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_LANHostConfigManagement_1)
+func NewLANHostConfigManagement1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_LANHostConfigManagement_1)
if err != nil {
return nil, err
}
return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil
}
+// NewLANHostConfigManagement1ClientsByURL is the legacy version of NewLANHostConfigManagement1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) {
+ return NewLANHostConfigManagement1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewLANHostConfigManagement1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -1421,35 +1445,47 @@ type Layer3Forwarding1 struct {
goupnp.ServiceClient
}
-// NewLayer3Forwarding1Clients discovers instances of the service on the network,
+// NewLayer3Forwarding1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) {
+func NewLayer3Forwarding1ClientsCtx(ctx context.Context) (clients []*Layer3Forwarding1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_Layer3Forwarding_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_Layer3Forwarding_1); err != nil {
return
}
clients = newLayer3Forwarding1ClientsFromGenericClients(genericClients)
return
}
-// NewLayer3Forwarding1ClientsByURL discovers instances of the service at the given
+// NewLayer3Forwarding1Clients is the legacy version of NewLayer3Forwarding1ClientsCtx, but uses
+// context.Background() as the context.
+func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) {
+ return NewLayer3Forwarding1ClientsCtx(context.Background())
+}
+
+// NewLayer3Forwarding1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_Layer3Forwarding_1)
+func NewLayer3Forwarding1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*Layer3Forwarding1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_Layer3Forwarding_1)
if err != nil {
return nil, err
}
return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil
}
+// NewLayer3Forwarding1ClientsByURL is the legacy version of NewLayer3Forwarding1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) {
+ return NewLayer3Forwarding1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewLayer3Forwarding1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -1552,35 +1588,47 @@ type WANCableLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANCableLinkConfig1Clients discovers instances of the service on the network,
+// NewWANCableLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) {
+func NewWANCableLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANCableLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCableLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCableLinkConfig_1); err != nil {
return
}
clients = newWANCableLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANCableLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANCableLinkConfig1Clients is the legacy version of NewWANCableLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) {
+ return NewWANCableLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANCableLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCableLinkConfig_1)
+func NewWANCableLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCableLinkConfig_1)
if err != nil {
return nil, err
}
return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANCableLinkConfig1ClientsByURL is the legacy version of NewWANCableLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) {
+ return NewWANCableLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANCableLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -1970,35 +2018,47 @@ type WANCommonInterfaceConfig1 struct {
goupnp.ServiceClient
}
-// NewWANCommonInterfaceConfig1Clients discovers instances of the service on the network,
+// NewWANCommonInterfaceConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
+func NewWANCommonInterfaceConfig1ClientsCtx(ctx context.Context) (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCommonInterfaceConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCommonInterfaceConfig_1); err != nil {
return
}
clients = newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANCommonInterfaceConfig1ClientsByURL discovers instances of the service at the given
+// NewWANCommonInterfaceConfig1Clients is the legacy version of NewWANCommonInterfaceConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) {
+ return NewWANCommonInterfaceConfig1ClientsCtx(context.Background())
+}
+
+// NewWANCommonInterfaceConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCommonInterfaceConfig_1)
+func NewWANCommonInterfaceConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCommonInterfaceConfig_1)
if err != nil {
return nil, err
}
return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANCommonInterfaceConfig1ClientsByURL is the legacy version of NewWANCommonInterfaceConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) {
+ return NewWANCommonInterfaceConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANCommonInterfaceConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -2407,35 +2467,47 @@ type WANDSLLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANDSLLinkConfig1Clients discovers instances of the service on the network,
+// NewWANDSLLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) {
+func NewWANDSLLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANDSLLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANDSLLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANDSLLinkConfig_1); err != nil {
return
}
clients = newWANDSLLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANDSLLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANDSLLinkConfig1Clients is the legacy version of NewWANDSLLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) {
+ return NewWANDSLLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANDSLLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANDSLLinkConfig_1)
+func NewWANDSLLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANDSLLinkConfig_1)
if err != nil {
return nil, err
}
return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANDSLLinkConfig1ClientsByURL is the legacy version of NewWANDSLLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) {
+ return NewWANDSLLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANDSLLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -2827,35 +2899,47 @@ type WANEthernetLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANEthernetLinkConfig1Clients discovers instances of the service on the network,
+// NewWANEthernetLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) {
+func NewWANEthernetLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANEthernetLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANEthernetLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANEthernetLinkConfig_1); err != nil {
return
}
clients = newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANEthernetLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANEthernetLinkConfig1Clients is the legacy version of NewWANEthernetLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) {
+ return NewWANEthernetLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANEthernetLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANEthernetLinkConfig_1)
+func NewWANEthernetLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANEthernetLinkConfig_1)
if err != nil {
return nil, err
}
return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANEthernetLinkConfig1ClientsByURL is the legacy version of NewWANEthernetLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) {
+ return NewWANEthernetLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANEthernetLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -2925,35 +3009,47 @@ type WANIPConnection1 struct {
goupnp.ServiceClient
}
-// NewWANIPConnection1Clients discovers instances of the service on the network,
+// NewWANIPConnection1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) {
+func NewWANIPConnection1ClientsCtx(ctx context.Context) (clients []*WANIPConnection1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPConnection_1); err != nil {
return
}
clients = newWANIPConnection1ClientsFromGenericClients(genericClients)
return
}
-// NewWANIPConnection1ClientsByURL discovers instances of the service at the given
+// NewWANIPConnection1Clients is the legacy version of NewWANIPConnection1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) {
+ return NewWANIPConnection1ClientsCtx(context.Background())
+}
+
+// NewWANIPConnection1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_1)
+func NewWANIPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPConnection_1)
if err != nil {
return nil, err
}
return newWANIPConnection1ClientsFromGenericClients(genericClients), nil
}
+// NewWANIPConnection1ClientsByURL is the legacy version of NewWANIPConnection1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) {
+ return NewWANIPConnection1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANIPConnection1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -3771,35 +3867,47 @@ type WANIPConnection2 struct {
goupnp.ServiceClient
}
-// NewWANIPConnection2Clients discovers instances of the service on the network,
+// NewWANIPConnection2ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANIPConnection2Clients() (clients []*WANIPConnection2, errors []error, err error) {
+func NewWANIPConnection2ClientsCtx(ctx context.Context) (clients []*WANIPConnection2, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_2); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPConnection_2); err != nil {
return
}
clients = newWANIPConnection2ClientsFromGenericClients(genericClients)
return
}
-// NewWANIPConnection2ClientsByURL discovers instances of the service at the given
+// NewWANIPConnection2Clients is the legacy version of NewWANIPConnection2ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANIPConnection2Clients() (clients []*WANIPConnection2, errors []error, err error) {
+ return NewWANIPConnection2ClientsCtx(context.Background())
+}
+
+// NewWANIPConnection2ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANIPConnection2ClientsByURL(loc *url.URL) ([]*WANIPConnection2, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_2)
+func NewWANIPConnection2ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPConnection2, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPConnection_2)
if err != nil {
return nil, err
}
return newWANIPConnection2ClientsFromGenericClients(genericClients), nil
}
+// NewWANIPConnection2ClientsByURL is the legacy version of NewWANIPConnection2ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANIPConnection2ClientsByURL(loc *url.URL) ([]*WANIPConnection2, error) {
+ return NewWANIPConnection2ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANIPConnection2ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -4833,35 +4941,47 @@ type WANIPv6FirewallControl1 struct {
goupnp.ServiceClient
}
-// NewWANIPv6FirewallControl1Clients discovers instances of the service on the network,
+// NewWANIPv6FirewallControl1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANIPv6FirewallControl1Clients() (clients []*WANIPv6FirewallControl1, errors []error, err error) {
+func NewWANIPv6FirewallControl1ClientsCtx(ctx context.Context) (clients []*WANIPv6FirewallControl1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPv6FirewallControl_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPv6FirewallControl_1); err != nil {
return
}
clients = newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients)
return
}
-// NewWANIPv6FirewallControl1ClientsByURL discovers instances of the service at the given
+// NewWANIPv6FirewallControl1Clients is the legacy version of NewWANIPv6FirewallControl1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANIPv6FirewallControl1Clients() (clients []*WANIPv6FirewallControl1, errors []error, err error) {
+ return NewWANIPv6FirewallControl1ClientsCtx(context.Background())
+}
+
+// NewWANIPv6FirewallControl1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANIPv6FirewallControl1ClientsByURL(loc *url.URL) ([]*WANIPv6FirewallControl1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPv6FirewallControl_1)
+func NewWANIPv6FirewallControl1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPv6FirewallControl1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPv6FirewallControl_1)
if err != nil {
return nil, err
}
return newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients), nil
}
+// NewWANIPv6FirewallControl1ClientsByURL is the legacy version of NewWANIPv6FirewallControl1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANIPv6FirewallControl1ClientsByURL(loc *url.URL) ([]*WANIPv6FirewallControl1, error) {
+ return NewWANIPv6FirewallControl1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANIPv6FirewallControl1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -5243,35 +5363,47 @@ type WANPOTSLinkConfig1 struct {
goupnp.ServiceClient
}
-// NewWANPOTSLinkConfig1Clients discovers instances of the service on the network,
+// NewWANPOTSLinkConfig1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) {
+func NewWANPOTSLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANPOTSLinkConfig1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPOTSLinkConfig_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPOTSLinkConfig_1); err != nil {
return
}
clients = newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients)
return
}
-// NewWANPOTSLinkConfig1ClientsByURL discovers instances of the service at the given
+// NewWANPOTSLinkConfig1Clients is the legacy version of NewWANPOTSLinkConfig1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) {
+ return NewWANPOTSLinkConfig1ClientsCtx(context.Background())
+}
+
+// NewWANPOTSLinkConfig1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPOTSLinkConfig_1)
+func NewWANPOTSLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPOTSLinkConfig_1)
if err != nil {
return nil, err
}
return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil
}
+// NewWANPOTSLinkConfig1ClientsByURL is the legacy version of NewWANPOTSLinkConfig1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) {
+ return NewWANPOTSLinkConfig1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANPOTSLinkConfig1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
@@ -5654,35 +5786,47 @@ type WANPPPConnection1 struct {
goupnp.ServiceClient
}
-// NewWANPPPConnection1Clients discovers instances of the service on the network,
+// NewWANPPPConnection1ClientsCtx discovers instances of the service on the network,
// and returns clients to any that are found. errors will contain an error for
// any devices that replied but which could not be queried, and err will be set
// if the discovery process failed outright.
//
// This is a typical entry calling point into this package.
-func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) {
+func NewWANPPPConnection1ClientsCtx(ctx context.Context) (clients []*WANPPPConnection1, errors []error, err error) {
var genericClients []goupnp.ServiceClient
- if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPPPConnection_1); err != nil {
+ if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPPPConnection_1); err != nil {
return
}
clients = newWANPPPConnection1ClientsFromGenericClients(genericClients)
return
}
-// NewWANPPPConnection1ClientsByURL discovers instances of the service at the given
+// NewWANPPPConnection1Clients is the legacy version of NewWANPPPConnection1ClientsCtx, but uses
+// context.Background() as the context.
+func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) {
+ return NewWANPPPConnection1ClientsCtx(context.Background())
+}
+
+// NewWANPPPConnection1ClientsByURLCtx discovers instances of the service at the given
// URL, and returns clients to any that are found. An error is returned if
// there was an error probing the service.
//
// This is a typical entry calling point into this package when reusing an
// previously discovered service URL.
-func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) {
- genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPPPConnection_1)
+func NewWANPPPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPPPConnection1, error) {
+ genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPPPConnection_1)
if err != nil {
return nil, err
}
return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil
}
+// NewWANPPPConnection1ClientsByURL is the legacy version of NewWANPPPConnection1ClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) {
+ return NewWANPPPConnection1ClientsByURLCtx(context.Background(), loc)
+}
+
// NewWANPPPConnection1ClientsFromRootDevice discovers instances of the service in
// a given root device, and returns clients to any that are found. An error is
// returned if there was not at least one instance of the service within the
diff --git a/vendor/github.com/huin/goupnp/device.go b/vendor/github.com/huin/goupnp/device.go
index 334e787ca..65f5635a7 100644
--- a/vendor/github.com/huin/goupnp/device.go
+++ b/vendor/github.com/huin/goupnp/device.go
@@ -3,6 +3,7 @@
package goupnp
import (
+ "context"
"encoding/xml"
"errors"
"fmt"
@@ -51,6 +52,7 @@ type Device struct {
ModelDescription string `xml:"modelDescription"`
ModelName string `xml:"modelName"`
ModelNumber string `xml:"modelNumber"`
+ ModelType string `xml:"modelType"`
ModelURL URLField `xml:"modelURL"`
SerialNumber string `xml:"serialNumber"`
UDN string `xml:"UDN"`
@@ -148,19 +150,25 @@ func (srv *Service) String() string {
return fmt.Sprintf("Service ID %s : %s", srv.ServiceId, srv.ServiceType)
}
-// RequestSCPD requests the SCPD (soap actions and state variables description)
+// RequestSCPDCtx requests the SCPD (soap actions and state variables description)
// for the service.
-func (srv *Service) RequestSCPD() (*scpd.SCPD, error) {
+func (srv *Service) RequestSCPDCtx(ctx context.Context) (*scpd.SCPD, error) {
if !srv.SCPDURL.Ok {
return nil, errors.New("bad/missing SCPD URL, or no URLBase has been set")
}
s := new(scpd.SCPD)
- if err := requestXml(srv.SCPDURL.URL.String(), scpd.SCPDXMLNamespace, s); err != nil {
+ if err := requestXml(ctx, srv.SCPDURL.URL.String(), scpd.SCPDXMLNamespace, s); err != nil {
return nil, err
}
return s, nil
}
+// RequestSCPD is the legacy version of RequestSCPDCtx, but uses
+// context.Background() as the context.
+func (srv *Service) RequestSCPD() (*scpd.SCPD, error) {
+ return srv.RequestSCPDCtx(context.Background())
+}
+
// RequestSCDP is for compatibility only, prefer RequestSCPD. This was a
// misspelling of RequestSCDP.
func (srv *Service) RequestSCDP() (*scpd.SCPD, error) {
diff --git a/vendor/github.com/huin/goupnp/go.work b/vendor/github.com/huin/goupnp/go.work
new file mode 100644
index 000000000..9b7d1ff71
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/go.work
@@ -0,0 +1,6 @@
+go 1.18
+
+use (
+ .
+ ./v2alpha
+)
diff --git a/vendor/github.com/huin/goupnp/goupnp.go b/vendor/github.com/huin/goupnp/goupnp.go
index 51963de3b..93c588b03 100644
--- a/vendor/github.com/huin/goupnp/goupnp.go
+++ b/vendor/github.com/huin/goupnp/goupnp.go
@@ -15,6 +15,7 @@
package goupnp
import (
+ "context"
"encoding/xml"
"fmt"
"io"
@@ -72,19 +73,19 @@ type MaybeRootDevice struct {
Err error
}
-// DiscoverDevices attempts to find targets of the given type. This is
+// DiscoverDevicesCtx attempts to find targets of the given type. This is
// typically the entry-point for this package. searchTarget is typically a URN
// in the form "urn:schemas-upnp-org:device:..." or
// "urn:schemas-upnp-org:service:...". A single error is returned for errors
// while attempting to send the query. An error or RootDevice is returned for
// each discovered RootDevice.
-func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) {
+func DiscoverDevicesCtx(ctx context.Context, searchTarget string) ([]MaybeRootDevice, error) {
hc, hcCleanup, err := httpuClient()
if err != nil {
return nil, err
}
defer hcCleanup()
- responses, err := ssdp.SSDPRawSearch(hc, string(searchTarget), 2, 3)
+ responses, err := ssdp.SSDPRawSearchCtx(ctx, hc, string(searchTarget), 2, 3)
if err != nil {
return nil, err
}
@@ -99,7 +100,7 @@ func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) {
continue
}
maybe.Location = loc
- if root, err := DeviceByURL(loc); err != nil {
+ if root, err := DeviceByURLCtx(ctx, loc); err != nil {
maybe.Err = err
} else {
maybe.Root = root
@@ -112,10 +113,16 @@ func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) {
return results, nil
}
-func DeviceByURL(loc *url.URL) (*RootDevice, error) {
+// DiscoverDevices is the legacy version of DiscoverDevicesCtx, but uses
+// context.Background() as the context.
+func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) {
+ return DiscoverDevicesCtx(context.Background(), searchTarget)
+}
+
+func DeviceByURLCtx(ctx context.Context, loc *url.URL) (*RootDevice, error) {
locStr := loc.String()
root := new(RootDevice)
- if err := requestXml(locStr, DeviceXMLNamespace, root); err != nil {
+ if err := requestXml(ctx, locStr, DeviceXMLNamespace, root); err != nil {
return nil, ContextError{fmt.Sprintf("error requesting root device details from %q", locStr), err}
}
var urlBaseStr string
@@ -132,17 +139,29 @@ func DeviceByURL(loc *url.URL) (*RootDevice, error) {
return root, nil
}
+func DeviceByURL(loc *url.URL) (*RootDevice, error) {
+ return DeviceByURLCtx(context.Background(), loc)
+}
+
// CharsetReaderDefault specifies the charset reader used while decoding the output
// from a UPnP server. It can be modified in an init function to allow for non-utf8 encodings,
// but should not be changed after requesting clients.
var CharsetReaderDefault func(charset string, input io.Reader) (io.Reader, error)
-func requestXml(url string, defaultSpace string, doc interface{}) error {
- timeout := time.Duration(3 * time.Second)
- client := http.Client{
- Timeout: timeout,
+// HTTPClient specifies the http.Client object used when fetching the XML from the UPnP server.
+// HTTPClient defaults the http.DefaultClient. This may be overridden by the importing application.
+var HTTPClientDefault = http.DefaultClient
+
+func requestXml(ctx context.Context, url string, defaultSpace string, doc interface{}) error {
+ ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ if err != nil {
+ return err
}
- resp, err := client.Get(url)
+
+ resp, err := HTTPClientDefault.Do(req)
if err != nil {
return err
}
diff --git a/vendor/github.com/huin/goupnp/httpu/serve.go b/vendor/github.com/huin/goupnp/httpu/serve.go
index 9f67af85b..bac3296fd 100644
--- a/vendor/github.com/huin/goupnp/httpu/serve.go
+++ b/vendor/github.com/huin/goupnp/httpu/serve.go
@@ -7,6 +7,7 @@ import (
"net"
"net/http"
"regexp"
+ "sync"
)
const (
@@ -73,20 +74,25 @@ func (srv *Server) Serve(l net.PacketConn) error {
if srv.MaxMessageBytes != 0 {
maxMessageBytes = srv.MaxMessageBytes
}
+
+ bufPool := &sync.Pool{
+ New: func() interface{} {
+ return make([]byte, maxMessageBytes)
+ },
+ }
for {
- buf := make([]byte, maxMessageBytes)
+ buf := bufPool.Get().([]byte)
n, peerAddr, err := l.ReadFrom(buf)
if err != nil {
return err
}
- buf = buf[:n]
-
- go func(buf []byte, peerAddr net.Addr) {
+ go func() {
+ defer bufPool.Put(buf)
// At least one router's UPnP implementation has added a trailing space
// after "HTTP/1.1" - trim it.
- buf = trailingWhitespaceRx.ReplaceAllLiteral(buf, crlf)
+ reqBuf := trailingWhitespaceRx.ReplaceAllLiteral(buf[:n], crlf)
- req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(buf)))
+ req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(reqBuf)))
if err != nil {
log.Printf("httpu: Failed to parse request: %v", err)
return
@@ -94,7 +100,7 @@ func (srv *Server) Serve(l net.PacketConn) error {
req.RemoteAddr = peerAddr.String()
srv.Handler.ServeMessage(req)
// No need to call req.Body.Close - underlying reader is bytes.Buffer.
- }(buf, peerAddr)
+ }()
}
}
diff --git a/vendor/github.com/huin/goupnp/service_client.go b/vendor/github.com/huin/goupnp/service_client.go
index 79a375d5c..cb65c19ee 100644
--- a/vendor/github.com/huin/goupnp/service_client.go
+++ b/vendor/github.com/huin/goupnp/service_client.go
@@ -1,6 +1,7 @@
package goupnp
import (
+ "context"
"fmt"
"net"
"net/url"
@@ -21,12 +22,12 @@ type ServiceClient struct {
localAddr net.IP
}
-// NewServiceClients discovers services, and returns clients for them. err will
+// NewServiceClientsCtx discovers services, and returns clients for them. err will
// report any error with the discovery process (blocking any device/service
// discovery), errors reports errors on a per-root-device basis.
-func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []error, err error) {
+func NewServiceClientsCtx(ctx context.Context, searchTarget string) (clients []ServiceClient, errors []error, err error) {
var maybeRootDevices []MaybeRootDevice
- if maybeRootDevices, err = DiscoverDevices(searchTarget); err != nil {
+ if maybeRootDevices, err = DiscoverDevicesCtx(ctx, searchTarget); err != nil {
return
}
@@ -49,16 +50,28 @@ func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []e
return
}
-// NewServiceClientsByURL creates client(s) for the given service URN, for a
+// NewServiceClients is the legacy version of NewServiceClientsCtx, but uses
+// context.Background() as the context.
+func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []error, err error) {
+ return NewServiceClientsCtx(context.Background(), searchTarget)
+}
+
+// NewServiceClientsByURLCtx creates client(s) for the given service URN, for a
// root device at the given URL.
-func NewServiceClientsByURL(loc *url.URL, searchTarget string) ([]ServiceClient, error) {
- rootDevice, err := DeviceByURL(loc)
+func NewServiceClientsByURLCtx(ctx context.Context, loc *url.URL, searchTarget string) ([]ServiceClient, error) {
+ rootDevice, err := DeviceByURLCtx(ctx, loc)
if err != nil {
return nil, err
}
return NewServiceClientsFromRootDevice(rootDevice, loc, searchTarget)
}
+// NewServiceClientsByURL is the legacy version of NewServiceClientsByURLCtx, but uses
+// context.Background() as the context.
+func NewServiceClientsByURL(loc *url.URL, searchTarget string) ([]ServiceClient, error) {
+ return NewServiceClientsByURLCtx(context.Background(), loc, searchTarget)
+}
+
// NewServiceClientsFromDevice creates client(s) for the given service URN, in
// a given root device. The loc parameter is simply assigned to the
// Location attribute of the returned ServiceClient(s).
diff --git a/vendor/github.com/huin/goupnp/soap/soap.go b/vendor/github.com/huin/goupnp/soap/soap.go
index 0d7a7582f..689f2a43d 100644
--- a/vendor/github.com/huin/goupnp/soap/soap.go
+++ b/vendor/github.com/huin/goupnp/soap/soap.go
@@ -194,9 +194,13 @@ type soapBody struct {
// SOAPFaultError implements error, and contains SOAP fault information.
type SOAPFaultError struct {
- FaultCode string `xml:"faultCode"`
- FaultString string `xml:"faultString"`
+ FaultCode string `xml:"faultcode"`
+ FaultString string `xml:"faultstring"`
Detail struct {
+ UPnPError struct {
+ Errorcode int `xml:"errorCode"`
+ ErrorDescription string `xml:"errorDescription"`
+ } `xml:"UPnPError"`
Raw []byte `xml:",innerxml"`
} `xml:"detail"`
}
diff --git a/vendor/github.com/huin/goupnp/soap/types.go b/vendor/github.com/huin/goupnp/soap/types.go
index 3e73d99d9..b54b21688 100644
--- a/vendor/github.com/huin/goupnp/soap/types.go
+++ b/vendor/github.com/huin/goupnp/soap/types.go
@@ -526,3 +526,53 @@ func MarshalURI(v *url.URL) (string, error) {
func UnmarshalURI(s string) (*url.URL, error) {
return url.Parse(s)
}
+
+// TypeData provides metadata about for marshalling and unmarshalling a SOAP
+// type.
+type TypeData struct {
+ funcSuffix string
+ goType string
+}
+
+// GoTypeName returns the name of the Go type.
+func (td TypeData) GoTypeName() string {
+ return td.goType
+}
+
+// MarshalFunc returns the name of the function that marshals the type.
+func (td TypeData) MarshalFunc() string {
+ return fmt.Sprintf("Marshal%s", td.funcSuffix)
+}
+
+// UnmarshalFunc returns the name of the function that unmarshals the type.
+func (td TypeData) UnmarshalFunc() string {
+ return fmt.Sprintf("Unmarshal%s", td.funcSuffix)
+}
+
+// TypeDataMap maps from a SOAP type (e.g "fixed.14.4") to its type data.
+var TypeDataMap = map[string]TypeData{
+ "ui1": {"Ui1", "uint8"},
+ "ui2": {"Ui2", "uint16"},
+ "ui4": {"Ui4", "uint32"},
+ "ui8": {"Ui8", "uint64"},
+ "i1": {"I1", "int8"},
+ "i2": {"I2", "int16"},
+ "i4": {"I4", "int32"},
+ "int": {"Int", "int64"},
+ "r4": {"R4", "float32"},
+ "r8": {"R8", "float64"},
+ "number": {"R8", "float64"}, // Alias for r8.
+ "fixed.14.4": {"Fixed14_4", "float64"},
+ "float": {"R8", "float64"},
+ "char": {"Char", "rune"},
+ "string": {"String", "string"},
+ "date": {"Date", "time.Time"},
+ "dateTime": {"DateTime", "time.Time"},
+ "dateTime.tz": {"DateTimeTz", "time.Time"},
+ "time": {"TimeOfDay", "soap.TimeOfDay"},
+ "time.tz": {"TimeOfDayTz", "soap.TimeOfDay"},
+ "boolean": {"Boolean", "bool"},
+ "bin.base64": {"BinBase64", "[]byte"},
+ "bin.hex": {"BinHex", "[]byte"},
+ "uri": {"URI", "*url.URL"},
+}
diff --git a/vendor/github.com/huin/goupnp/ssdp/ssdp.go b/vendor/github.com/huin/goupnp/ssdp/ssdp.go
index 85e106cb3..240dfa73d 100644
--- a/vendor/github.com/huin/goupnp/ssdp/ssdp.go
+++ b/vendor/github.com/huin/goupnp/ssdp/ssdp.go
@@ -1,6 +1,7 @@
package ssdp
import (
+ "context"
"errors"
"log"
"net/http"
@@ -34,14 +35,15 @@ type HTTPUClient interface {
) ([]*http.Response, error)
}
-// SSDPRawSearch performs a fairly raw SSDP search request, and returns the
+// SSDPRawSearchCtx performs a fairly raw SSDP search request, and returns the
// unique response(s) that it receives. Each response has the requested
// searchTarget, a USN, and a valid location. maxWaitSeconds states how long to
// wait for responses in seconds, and must be a minimum of 1 (the
// implementation waits an additional 100ms for responses to arrive), 2 is a
// reasonable value for this. numSends is the number of requests to send - 3 is
// a reasonable value for this.
-func SSDPRawSearch(
+func SSDPRawSearchCtx(
+ ctx context.Context,
httpu HTTPUClient,
searchTarget string,
maxWaitSeconds int,
@@ -51,7 +53,7 @@ func SSDPRawSearch(
return nil, errors.New("ssdp: maxWaitSeconds must be >= 1")
}
- req := http.Request{
+ req := (&http.Request{
Method: methodSearch,
// TODO: Support both IPv4 and IPv6.
Host: ssdpUDP4Addr,
@@ -64,8 +66,8 @@ func SSDPRawSearch(
"MAN": []string{ssdpDiscover},
"ST": []string{searchTarget},
},
- }
- allResponses, err := httpu.Do(&req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
+ }).WithContext(ctx)
+ allResponses, err := httpu.Do(req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
if err != nil {
return nil, err
}
@@ -97,3 +99,9 @@ func SSDPRawSearch(
return responses, nil
}
+
+// SSDPRawSearch is the legacy version of SSDPRawSearchCtx, but uses
+// context.Background() as the context.
+func SSDPRawSearch(httpu HTTPUClient, searchTarget string, maxWaitSeconds int, numSends int) ([]*http.Response, error) {
+ return SSDPRawSearchCtx(context.Background(), httpu, searchTarget, maxWaitSeconds, numSends)
+}
diff --git a/vendor/github.com/huin/goupnp/workspace.code-workspace b/vendor/github.com/huin/goupnp/workspace.code-workspace
new file mode 100644
index 000000000..7d337cad8
--- /dev/null
+++ b/vendor/github.com/huin/goupnp/workspace.code-workspace
@@ -0,0 +1,11 @@
+{
+ "folders": [
+ {
+ "path": "."
+ },
+ {
+ "path": "v2alpha"
+ }
+ ],
+ "settings": {}
+}
diff --git a/vendor/github.com/ipfs/go-cid/README.md b/vendor/github.com/ipfs/go-cid/README.md
index 89da04128..70c3e5c7d 100644
--- a/vendor/github.com/ipfs/go-cid/README.md
+++ b/vendor/github.com/ipfs/go-cid/README.md
@@ -69,7 +69,7 @@ import (
// Create a cid manually by specifying the 'prefix' parameters
pref := cid.Prefix{
Version: 1,
- Codec: mc.Raw,
+ Codec: uint64(mc.Raw),
MhType: mh.SHA2_256,
MhLength: -1, // default length
}
diff --git a/vendor/github.com/ipfs/go-cid/cid.go b/vendor/github.com/ipfs/go-cid/cid.go
index bc5704aa2..f1824248f 100644
--- a/vendor/github.com/ipfs/go-cid/cid.go
+++ b/vendor/github.com/ipfs/go-cid/cid.go
@@ -10,7 +10,7 @@
//
// A CIDv1 has four parts:
//
-// ::=
+// ::=
//
// As shown above, the CID implementation relies heavily on Multiformats,
// particularly Multibase
@@ -37,10 +37,32 @@ import (
// UnsupportedVersionString just holds an error message
const UnsupportedVersionString = ""
+// ErrInvalidCid is an error that indicates that a CID is invalid.
+type ErrInvalidCid struct {
+ Err error
+}
+
+func (e ErrInvalidCid) Error() string {
+ return fmt.Sprintf("invalid cid: %s", e.Err)
+}
+
+func (e ErrInvalidCid) Unwrap() error {
+ return e.Err
+}
+
+func (e ErrInvalidCid) Is(err error) bool {
+ switch err.(type) {
+ case ErrInvalidCid, *ErrInvalidCid:
+ return true
+ default:
+ return false
+ }
+}
+
var (
// ErrCidTooShort means that the cid passed to decode was not long
// enough to be a valid Cid
- ErrCidTooShort = errors.New("cid too short")
+ ErrCidTooShort = ErrInvalidCid{errors.New("cid too short")}
// ErrInvalidEncoding means that selected encoding is not supported
// by this Cid version
@@ -90,10 +112,10 @@ func tryNewCidV0(mhash mh.Multihash) (Cid, error) {
// incorrectly detect it as CidV1 in the Version() method
dec, err := mh.Decode(mhash)
if err != nil {
- return Undef, err
+ return Undef, ErrInvalidCid{err}
}
if dec.Code != mh.SHA2_256 || dec.Length != 32 {
- return Undef, fmt.Errorf("invalid hash for cidv0 %d-%d", dec.Code, dec.Length)
+ return Undef, ErrInvalidCid{fmt.Errorf("invalid hash for cidv0 %d-%d", dec.Code, dec.Length)}
}
return Cid{string(mhash)}, nil
}
@@ -177,14 +199,23 @@ func Parse(v interface{}) (Cid, error) {
case Cid:
return v2, nil
default:
- return Undef, fmt.Errorf("can't parse %+v as Cid", v2)
+ return Undef, ErrInvalidCid{fmt.Errorf("can't parse %+v as Cid", v2)}
}
}
+// MustParse calls Parse but will panic on error.
+func MustParse(v interface{}) Cid {
+ c, err := Parse(v)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
// Decode parses a Cid-encoded string and returns a Cid object.
// For CidV1, a Cid-encoded string is primarily a multibase string:
//
-//
+//
//
// The base-encoded string represents a:
//
@@ -201,7 +232,7 @@ func Decode(v string) (Cid, error) {
if len(v) == 46 && v[:2] == "Qm" {
hash, err := mh.FromB58String(v)
if err != nil {
- return Undef, err
+ return Undef, ErrInvalidCid{err}
}
return tryNewCidV0(hash)
@@ -209,7 +240,7 @@ func Decode(v string) (Cid, error) {
_, data, err := mbase.Decode(v)
if err != nil {
- return Undef, err
+ return Undef, ErrInvalidCid{err}
}
return Cast(data)
@@ -231,7 +262,7 @@ func ExtractEncoding(v string) (mbase.Encoding, error) {
// check encoding is valid
_, err := mbase.NewEncoder(encoding)
if err != nil {
- return -1, err
+ return -1, ErrInvalidCid{err}
}
return encoding, nil
@@ -240,7 +271,7 @@ func ExtractEncoding(v string) (mbase.Encoding, error) {
// Cast takes a Cid data slice, parses it and returns a Cid.
// For CidV1, the data buffer is in the form:
//
-//
+//
//
// CidV0 are also supported. In particular, data buffers starting
// with length 34 bytes, which starts with bytes [18,32...] are considered
@@ -251,11 +282,11 @@ func ExtractEncoding(v string) (mbase.Encoding, error) {
func Cast(data []byte) (Cid, error) {
nr, c, err := CidFromBytes(data)
if err != nil {
- return Undef, err
+ return Undef, ErrInvalidCid{err}
}
if nr != len(data) {
- return Undef, fmt.Errorf("trailing bytes in data buffer passed to cid Cast")
+ return Undef, ErrInvalidCid{fmt.Errorf("trailing bytes in data buffer passed to cid Cast")}
}
return c, nil
@@ -369,7 +400,13 @@ func (c Cid) Hash() mh.Multihash {
// Bytes returns the byte representation of a Cid.
// The output of bytes can be parsed back into a Cid
// with Cast().
+//
+// If c.Defined() == false, it return a nil slice and may not
+// be parsable with Cast().
func (c Cid) Bytes() []byte {
+ if !c.Defined() {
+ return nil
+ }
return []byte(c.str)
}
@@ -419,7 +456,7 @@ func (c Cid) Equals(o Cid) bool {
// UnmarshalJSON parses the JSON representation of a Cid.
func (c *Cid) UnmarshalJSON(b []byte) error {
if len(b) < 2 {
- return fmt.Errorf("invalid cid json blob")
+ return ErrInvalidCid{fmt.Errorf("invalid cid json blob")}
}
obj := struct {
CidTarget string `json:"/"`
@@ -427,7 +464,7 @@ func (c *Cid) UnmarshalJSON(b []byte) error {
objptr := &obj
err := json.Unmarshal(b, &objptr)
if err != nil {
- return err
+ return ErrInvalidCid{err}
}
if objptr == nil {
*c = Cid{}
@@ -435,12 +472,12 @@ func (c *Cid) UnmarshalJSON(b []byte) error {
}
if obj.CidTarget == "" {
- return fmt.Errorf("cid was incorrectly formatted")
+ return ErrInvalidCid{fmt.Errorf("cid was incorrectly formatted")}
}
out, err := Decode(obj.CidTarget)
if err != nil {
- return err
+ return ErrInvalidCid{err}
}
*c = out
@@ -450,7 +487,7 @@ func (c *Cid) UnmarshalJSON(b []byte) error {
// MarshalJSON procudes a JSON representation of a Cid, which looks as follows:
//
-// { "/": "" }
+// { "/": "" }
//
// Note that this formatting comes from the IPLD specification
// (https://github.com/ipld/specs/tree/master/ipld)
@@ -507,7 +544,8 @@ func (c Cid) Prefix() Prefix {
// and the Multihash length. It does not contains
// any actual content information.
// NOTE: The use -1 in MhLength to mean default length is deprecated,
-// use the V0Builder or V1Builder structures instead
+//
+// use the V0Builder or V1Builder structures instead
type Prefix struct {
Version uint64
Codec uint64
@@ -526,12 +564,12 @@ func (p Prefix) Sum(data []byte) (Cid, error) {
if p.Version == 0 && (p.MhType != mh.SHA2_256 ||
(p.MhLength != 32 && p.MhLength != -1)) {
- return Undef, fmt.Errorf("invalid v0 prefix")
+ return Undef, ErrInvalidCid{fmt.Errorf("invalid v0 prefix")}
}
hash, err := mh.Sum(data, p.MhType, length)
if err != nil {
- return Undef, err
+ return Undef, ErrInvalidCid{err}
}
switch p.Version {
@@ -540,13 +578,13 @@ func (p Prefix) Sum(data []byte) (Cid, error) {
case 1:
return NewCidV1(p.Codec, hash), nil
default:
- return Undef, fmt.Errorf("invalid cid version")
+ return Undef, ErrInvalidCid{fmt.Errorf("invalid cid version")}
}
}
// Bytes returns a byte representation of a Prefix. It looks like:
//
-//
+//
func (p Prefix) Bytes() []byte {
size := varint.UvarintSize(p.Version)
size += varint.UvarintSize(p.Codec)
@@ -570,22 +608,22 @@ func PrefixFromBytes(buf []byte) (Prefix, error) {
r := bytes.NewReader(buf)
vers, err := varint.ReadUvarint(r)
if err != nil {
- return Prefix{}, err
+ return Prefix{}, ErrInvalidCid{err}
}
codec, err := varint.ReadUvarint(r)
if err != nil {
- return Prefix{}, err
+ return Prefix{}, ErrInvalidCid{err}
}
mhtype, err := varint.ReadUvarint(r)
if err != nil {
- return Prefix{}, err
+ return Prefix{}, ErrInvalidCid{err}
}
mhlen, err := varint.ReadUvarint(r)
if err != nil {
- return Prefix{}, err
+ return Prefix{}, ErrInvalidCid{err}
}
return Prefix{
@@ -599,12 +637,12 @@ func PrefixFromBytes(buf []byte) (Prefix, error) {
func CidFromBytes(data []byte) (int, Cid, error) {
if len(data) > 2 && data[0] == mh.SHA2_256 && data[1] == 32 {
if len(data) < 34 {
- return 0, Undef, fmt.Errorf("not enough bytes for cid v0")
+ return 0, Undef, ErrInvalidCid{fmt.Errorf("not enough bytes for cid v0")}
}
h, err := mh.Cast(data[:34])
if err != nil {
- return 0, Undef, err
+ return 0, Undef, ErrInvalidCid{err}
}
return 34, Cid{string(h)}, nil
@@ -612,21 +650,21 @@ func CidFromBytes(data []byte) (int, Cid, error) {
vers, n, err := varint.FromUvarint(data)
if err != nil {
- return 0, Undef, err
+ return 0, Undef, ErrInvalidCid{err}
}
if vers != 1 {
- return 0, Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers)
+ return 0, Undef, ErrInvalidCid{fmt.Errorf("expected 1 as the cid version number, got: %d", vers)}
}
_, cn, err := varint.FromUvarint(data[n:])
if err != nil {
- return 0, Undef, err
+ return 0, Undef, ErrInvalidCid{err}
}
mhnr, _, err := mh.MHFromBytes(data[n+cn:])
if err != nil {
- return 0, Undef, err
+ return 0, Undef, ErrInvalidCid{err}
}
l := n + cn + mhnr
@@ -679,6 +717,9 @@ func (r *bufByteReader) ReadByte() (byte, error) {
// It's recommended to supply a reader that buffers and implements io.ByteReader,
// as CidFromReader has to do many single-byte reads to decode varints.
// If the argument only implements io.Reader, single-byte Read calls are used instead.
+//
+// If the Reader is found to yield zero bytes, an io.EOF error is returned directly, in all
+// other error cases, an ErrInvalidCid, wrapping the original error, is returned.
func CidFromReader(r io.Reader) (int, Cid, error) {
// 64 bytes is enough for any CIDv0,
// and it's enough for most CIDv1s in practice.
@@ -689,32 +730,37 @@ func CidFromReader(r io.Reader) (int, Cid, error) {
// The varint package wants a io.ByteReader, so we must wrap our io.Reader.
vers, err := varint.ReadUvarint(br)
if err != nil {
- return len(br.dst), Undef, err
+ if err == io.EOF {
+ // First-byte read in ReadUvarint errors with io.EOF, so reader has no data.
+ // Subsequent reads with an EOF will return io.ErrUnexpectedEOF and be wrapped here.
+ return 0, Undef, err
+ }
+ return len(br.dst), Undef, ErrInvalidCid{err}
}
// If we have a CIDv0, read the rest of the bytes and cast the buffer.
if vers == mh.SHA2_256 {
if n, err := io.ReadFull(r, br.dst[1:34]); err != nil {
- return len(br.dst) + n, Undef, err
+ return len(br.dst) + n, Undef, ErrInvalidCid{err}
}
br.dst = br.dst[:34]
h, err := mh.Cast(br.dst)
if err != nil {
- return len(br.dst), Undef, err
+ return len(br.dst), Undef, ErrInvalidCid{err}
}
return len(br.dst), Cid{string(h)}, nil
}
if vers != 1 {
- return len(br.dst), Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers)
+ return len(br.dst), Undef, ErrInvalidCid{fmt.Errorf("expected 1 as the cid version number, got: %d", vers)}
}
// CID block encoding multicodec.
_, err = varint.ReadUvarint(br)
if err != nil {
- return len(br.dst), Undef, err
+ return len(br.dst), Undef, ErrInvalidCid{err}
}
// We could replace most of the code below with go-multihash's ReadMultihash.
@@ -725,19 +771,19 @@ func CidFromReader(r io.Reader) (int, Cid, error) {
// Multihash hash function code.
_, err = varint.ReadUvarint(br)
if err != nil {
- return len(br.dst), Undef, err
+ return len(br.dst), Undef, ErrInvalidCid{err}
}
// Multihash digest length.
mhl, err := varint.ReadUvarint(br)
if err != nil {
- return len(br.dst), Undef, err
+ return len(br.dst), Undef, ErrInvalidCid{err}
}
// Refuse to make large allocations to prevent OOMs due to bugs.
const maxDigestAlloc = 32 << 20 // 32MiB
if mhl > maxDigestAlloc {
- return len(br.dst), Undef, fmt.Errorf("refusing to allocate %d bytes for a digest", mhl)
+ return len(br.dst), Undef, ErrInvalidCid{fmt.Errorf("refusing to allocate %d bytes for a digest", mhl)}
}
// Fine to convert mhl to int, given maxDigestAlloc.
@@ -756,7 +802,7 @@ func CidFromReader(r io.Reader) (int, Cid, error) {
if n, err := io.ReadFull(r, br.dst[prefixLength:cidLength]); err != nil {
// We can't use len(br.dst) here,
// as we've only read n bytes past prefixLength.
- return prefixLength + n, Undef, err
+ return prefixLength + n, Undef, ErrInvalidCid{err}
}
// This simply ensures the multihash is valid.
@@ -764,7 +810,7 @@ func CidFromReader(r io.Reader) (int, Cid, error) {
// for now, it helps ensure consistency with CidFromBytes.
_, _, err = mh.MHFromBytes(br.dst[mhStart:])
if err != nil {
- return len(br.dst), Undef, err
+ return len(br.dst), Undef, ErrInvalidCid{err}
}
return len(br.dst), Cid{string(br.dst)}, nil
diff --git a/vendor/github.com/ipfs/go-cid/cid_fuzz.go b/vendor/github.com/ipfs/go-cid/cid_fuzz.go
index 0b0408c07..af1ab46b5 100644
--- a/vendor/github.com/ipfs/go-cid/cid_fuzz.go
+++ b/vendor/github.com/ipfs/go-cid/cid_fuzz.go
@@ -1,5 +1,4 @@
//go:build gofuzz
-// +build gofuzz
package cid
diff --git a/vendor/github.com/ipfs/go-cid/version.json b/vendor/github.com/ipfs/go-cid/version.json
index 1437d5b73..26a7d4785 100644
--- a/vendor/github.com/ipfs/go-cid/version.json
+++ b/vendor/github.com/ipfs/go-cid/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.2.0"
+ "version": "v0.4.1"
}
diff --git a/vendor/github.com/ipfs/go-datastore/basic_ds.go b/vendor/github.com/ipfs/go-datastore/basic_ds.go
index 95f03fe8c..22cfd70d2 100644
--- a/vendor/github.com/ipfs/go-datastore/basic_ds.go
+++ b/vendor/github.com/ipfs/go-datastore/basic_ds.go
@@ -89,62 +89,6 @@ func (d *MapDatastore) Close() error {
return nil
}
-// NullDatastore stores nothing, but conforms to the API.
-// Useful to test with.
-type NullDatastore struct {
-}
-
-var _ Datastore = (*NullDatastore)(nil)
-var _ Batching = (*NullDatastore)(nil)
-
-// NewNullDatastore constructs a null datastoe
-func NewNullDatastore() *NullDatastore {
- return &NullDatastore{}
-}
-
-// Put implements Datastore.Put
-func (d *NullDatastore) Put(ctx context.Context, key Key, value []byte) (err error) {
- return nil
-}
-
-// Sync implements Datastore.Sync
-func (d *NullDatastore) Sync(ctx context.Context, prefix Key) error {
- return nil
-}
-
-// Get implements Datastore.Get
-func (d *NullDatastore) Get(ctx context.Context, key Key) (value []byte, err error) {
- return nil, ErrNotFound
-}
-
-// Has implements Datastore.Has
-func (d *NullDatastore) Has(ctx context.Context, key Key) (exists bool, err error) {
- return false, nil
-}
-
-// Has implements Datastore.GetSize
-func (d *NullDatastore) GetSize(ctx context.Context, key Key) (size int, err error) {
- return -1, ErrNotFound
-}
-
-// Delete implements Datastore.Delete
-func (d *NullDatastore) Delete(ctx context.Context, key Key) (err error) {
- return nil
-}
-
-// Query implements Datastore.Query
-func (d *NullDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
- return dsq.ResultsWithEntries(q, nil), nil
-}
-
-func (d *NullDatastore) Batch(ctx context.Context) (Batch, error) {
- return NewBasicBatch(d), nil
-}
-
-func (d *NullDatastore) Close() error {
- return nil
-}
-
// LogDatastore logs all accesses through the datastore.
type LogDatastore struct {
Name string
diff --git a/vendor/github.com/ipfs/go-datastore/datastore.go b/vendor/github.com/ipfs/go-datastore/datastore.go
index 0d075df62..8926bb449 100644
--- a/vendor/github.com/ipfs/go-datastore/datastore.go
+++ b/vendor/github.com/ipfs/go-datastore/datastore.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"io"
- "time"
query "github.com/ipfs/go-datastore/query"
)
@@ -103,8 +102,7 @@ type Read interface {
// capabilities of a `Batch`, but the reverse is NOT true.
type Batching interface {
Datastore
-
- Batch(ctx context.Context) (Batch, error)
+ BatchingFeature
}
// ErrBatchUnsupported is returned if the by Batch if the Datastore doesn't
@@ -115,8 +113,7 @@ var ErrBatchUnsupported = errors.New("this datastore does not support batching")
// which may need checking on-disk data integrity.
type CheckedDatastore interface {
Datastore
-
- Check(ctx context.Context) error
+ CheckedFeature
}
// ScrubbedDatastore is an interface that should be implemented by datastores
@@ -124,25 +121,21 @@ type CheckedDatastore interface {
// error correction.
type ScrubbedDatastore interface {
Datastore
-
- Scrub(ctx context.Context) error
+ ScrubbedFeature
}
// GCDatastore is an interface that should be implemented by datastores which
// don't free disk space by just removing data from them.
type GCDatastore interface {
Datastore
-
- CollectGarbage(ctx context.Context) error
+ GCFeature
}
// PersistentDatastore is an interface that should be implemented by datastores
// which can report disk usage.
type PersistentDatastore interface {
Datastore
-
- // DiskUsage returns the space used by a datastore, in bytes.
- DiskUsage(ctx context.Context) (uint64, error)
+ PersistentFeature
}
// DiskUsage checks if a Datastore is a
@@ -163,13 +156,6 @@ type TTLDatastore interface {
TTL
}
-// TTL encapulates the methods that deal with entries with time-to-live.
-type TTL interface {
- PutWithTTL(ctx context.Context, key Key, value []byte, ttl time.Duration) error
- SetTTL(ctx context.Context, key Key, ttl time.Duration) error
- GetExpiration(ctx context.Context, key Key) (time.Time, error)
-}
-
// Txn extends the Datastore type. Txns allow users to batch queries and
// mutations to the Datastore into atomic groups, or transactions. Actions
// performed on a transaction will not take hold until a successful call to
@@ -194,8 +180,7 @@ type Txn interface {
// support transactions.
type TxnDatastore interface {
Datastore
-
- NewTransaction(ctx context.Context, readOnly bool) (Txn, error)
+ TxnFeature
}
// Errors
diff --git a/vendor/github.com/ipfs/go-datastore/features.go b/vendor/github.com/ipfs/go-datastore/features.go
new file mode 100644
index 000000000..09abc3f99
--- /dev/null
+++ b/vendor/github.com/ipfs/go-datastore/features.go
@@ -0,0 +1,132 @@
+package datastore
+
+import (
+ "context"
+ "reflect"
+ "time"
+)
+
+const (
+ FeatureNameBatching = "Batching"
+ FeatureNameChecked = "Checked"
+ FeatureNameGC = "GC"
+ FeatureNamePersistent = "Persistent"
+ FeatureNameScrubbed = "Scrubbed"
+ FeatureNameTTL = "TTL"
+ FeatureNameTransaction = "Transaction"
+)
+
+type BatchingFeature interface {
+ Batch(ctx context.Context) (Batch, error)
+}
+
+type CheckedFeature interface {
+ Check(ctx context.Context) error
+}
+
+type ScrubbedFeature interface {
+ Scrub(ctx context.Context) error
+}
+
+type GCFeature interface {
+ CollectGarbage(ctx context.Context) error
+}
+
+type PersistentFeature interface {
+ // DiskUsage returns the space used by a datastore, in bytes.
+ DiskUsage(ctx context.Context) (uint64, error)
+}
+
+// TTL encapulates the methods that deal with entries with time-to-live.
+type TTL interface {
+ PutWithTTL(ctx context.Context, key Key, value []byte, ttl time.Duration) error
+ SetTTL(ctx context.Context, key Key, ttl time.Duration) error
+ GetExpiration(ctx context.Context, key Key) (time.Time, error)
+}
+
+type TxnFeature interface {
+ NewTransaction(ctx context.Context, readOnly bool) (Txn, error)
+}
+
+// Feature contains metadata about a datastore Feature.
+type Feature struct {
+ Name string
+ // Interface is the nil interface of the feature.
+ Interface interface{}
+ // DatastoreInterface is the nil interface of the feature's corresponding datastore interface.
+ DatastoreInterface interface{}
+}
+
+var featuresByName map[string]Feature
+
+func init() {
+ featuresByName = map[string]Feature{}
+ for _, f := range Features() {
+ featuresByName[f.Name] = f
+ }
+}
+
+// Features returns a list of all known datastore features.
+// This serves both to provide an authoritative list of features,
+// and to define a canonical ordering of features.
+func Features() []Feature {
+ // for backwards compatibility, only append to this list
+ return []Feature{
+ {
+ Name: FeatureNameBatching,
+ Interface: (*BatchingFeature)(nil),
+ DatastoreInterface: (*Batching)(nil),
+ },
+ {
+ Name: FeatureNameChecked,
+ Interface: (*CheckedFeature)(nil),
+ DatastoreInterface: (*CheckedDatastore)(nil),
+ },
+ {
+ Name: FeatureNameGC,
+ Interface: (*GCFeature)(nil),
+ DatastoreInterface: (*GCDatastore)(nil),
+ },
+ {
+ Name: FeatureNamePersistent,
+ Interface: (*PersistentFeature)(nil),
+ DatastoreInterface: (*PersistentDatastore)(nil),
+ },
+ {
+ Name: FeatureNameScrubbed,
+ Interface: (*ScrubbedFeature)(nil),
+ DatastoreInterface: (*ScrubbedDatastore)(nil),
+ },
+ {
+ Name: FeatureNameTTL,
+ Interface: (*TTL)(nil),
+ DatastoreInterface: (*TTLDatastore)(nil),
+ },
+ {
+ Name: FeatureNameTransaction,
+ Interface: (*TxnFeature)(nil),
+ DatastoreInterface: (*TxnDatastore)(nil),
+ },
+ }
+}
+
+// FeatureByName returns the feature with the given name, if known.
+func FeatureByName(name string) (Feature, bool) {
+ feat, known := featuresByName[name]
+ return feat, known
+}
+
+// FeaturesForDatastore returns the features supported by the given datastore.
+func FeaturesForDatastore(dstore Datastore) (features []Feature) {
+ if dstore == nil {
+ return nil
+ }
+ dstoreType := reflect.TypeOf(dstore)
+ for _, f := range Features() {
+ fType := reflect.TypeOf(f.Interface).Elem()
+ if dstoreType.Implements(fType) {
+ features = append(features, f)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/ipfs/go-datastore/null_ds.go b/vendor/github.com/ipfs/go-datastore/null_ds.go
new file mode 100644
index 000000000..0fd15015f
--- /dev/null
+++ b/vendor/github.com/ipfs/go-datastore/null_ds.go
@@ -0,0 +1,120 @@
+package datastore
+
+import (
+ "context"
+
+ dsq "github.com/ipfs/go-datastore/query"
+)
+
+// NullDatastore stores nothing, but conforms to the API.
+// Useful to test with.
+type NullDatastore struct {
+}
+
+var _ Datastore = (*NullDatastore)(nil)
+var _ Batching = (*NullDatastore)(nil)
+var _ ScrubbedDatastore = (*NullDatastore)(nil)
+var _ CheckedDatastore = (*NullDatastore)(nil)
+var _ PersistentDatastore = (*NullDatastore)(nil)
+var _ GCDatastore = (*NullDatastore)(nil)
+var _ TxnDatastore = (*NullDatastore)(nil)
+
+// NewNullDatastore constructs a null datastoe
+func NewNullDatastore() *NullDatastore {
+ return &NullDatastore{}
+}
+
+// Put implements Datastore.Put
+func (d *NullDatastore) Put(ctx context.Context, key Key, value []byte) (err error) {
+ return nil
+}
+
+// Sync implements Datastore.Sync
+func (d *NullDatastore) Sync(ctx context.Context, prefix Key) error {
+ return nil
+}
+
+// Get implements Datastore.Get
+func (d *NullDatastore) Get(ctx context.Context, key Key) (value []byte, err error) {
+ return nil, ErrNotFound
+}
+
+// Has implements Datastore.Has
+func (d *NullDatastore) Has(ctx context.Context, key Key) (exists bool, err error) {
+ return false, nil
+}
+
+// Has implements Datastore.GetSize
+func (d *NullDatastore) GetSize(ctx context.Context, key Key) (size int, err error) {
+ return -1, ErrNotFound
+}
+
+// Delete implements Datastore.Delete
+func (d *NullDatastore) Delete(ctx context.Context, key Key) (err error) {
+ return nil
+}
+
+func (d *NullDatastore) Scrub(ctx context.Context) error {
+ return nil
+}
+
+func (d *NullDatastore) Check(ctx context.Context) error {
+ return nil
+}
+
+// Query implements Datastore.Query
+func (d *NullDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
+ return dsq.ResultsWithEntries(q, nil), nil
+}
+
+func (d *NullDatastore) Batch(ctx context.Context) (Batch, error) {
+ return NewBasicBatch(d), nil
+}
+
+func (d *NullDatastore) CollectGarbage(ctx context.Context) error {
+ return nil
+}
+
+func (d *NullDatastore) DiskUsage(ctx context.Context) (uint64, error) {
+ return 0, nil
+}
+
+func (d *NullDatastore) Close() error {
+ return nil
+}
+
+func (d *NullDatastore) NewTransaction(ctx context.Context, readOnly bool) (Txn, error) {
+ return &nullTxn{}, nil
+}
+
+type nullTxn struct{}
+
+func (t *nullTxn) Get(ctx context.Context, key Key) (value []byte, err error) {
+ return nil, nil
+}
+
+func (t *nullTxn) Has(ctx context.Context, key Key) (exists bool, err error) {
+ return false, nil
+}
+
+func (t *nullTxn) GetSize(ctx context.Context, key Key) (size int, err error) {
+ return 0, nil
+}
+
+func (t *nullTxn) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
+ return dsq.ResultsWithEntries(q, nil), nil
+}
+
+func (t *nullTxn) Put(ctx context.Context, key Key, value []byte) error {
+ return nil
+}
+
+func (t *nullTxn) Delete(ctx context.Context, key Key) error {
+ return nil
+}
+
+func (t *nullTxn) Commit(ctx context.Context) error {
+ return nil
+}
+
+func (t *nullTxn) Discard(ctx context.Context) {}
diff --git a/vendor/github.com/ipfs/go-datastore/version.json b/vendor/github.com/ipfs/go-datastore/version.json
index 4ab5086b7..42c14d1be 100644
--- a/vendor/github.com/ipfs/go-datastore/version.json
+++ b/vendor/github.com/ipfs/go-datastore/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.5.1"
+ "version": "v0.6.0"
}
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
index b35f8449b..d31b37815 100644
--- a/vendor/github.com/klauspost/compress/.gitignore
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -23,3 +23,10 @@ _testmain.go
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe
+
+# Linux perf files
+perf.data
+perf.data.old
+
+# gdb history
+.gdb_history
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index 0af08e65e..7a008a4d2 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,7 +3,7 @@
before:
hooks:
- ./gen.sh
- - go install mvdan.cc/garble@latest
+ - go install mvdan.cc/garble@v0.9.3
builds:
-
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 0e2dc116a..efab55e65 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -9,7 +9,6 @@ This package provides various compression algorithms.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
-* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
@@ -17,6 +16,142 @@ This package provides various compression algorithms.
# changelog
+* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
+ * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
+ * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
+ * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
+ * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
+ * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
+ * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
+ * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+
+* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
+ * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
+ * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
+ * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
+ * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
+ * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
+
+* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
+ * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
+ * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
+ * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
+ * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
+ * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
+ * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
+
+* Jan 21st, 2023 (v1.15.15)
+ * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
+ * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
+ * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
+ * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
+
+* Jan 3rd, 2023 (v1.15.14)
+
+ * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
+ * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
+ * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
+ * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
+
+* Dec 11, 2022 (v1.15.13)
+ * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
+ * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
+
+* Oct 26, 2022 (v1.15.12)
+
+ * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
+ * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
+
+* Sept 26, 2022 (v1.15.11)
+
+ * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
+ * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
+ * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
+ * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
+
+* Sept 16, 2022 (v1.15.10)
+
+ * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+ * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
+ * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+ * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+ * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+ * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+ * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
+* July 13, 2022 (v1.15.8)
+
+ * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
+ * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
+ * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
+ * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
+ * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
+ * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
+ * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
+
+* June 29, 2022 (v1.15.7)
+
+ * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
+ * zip: Merge upstream https://github.com/klauspost/compress/pull/631
+ * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
+ * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
+ * flate: Faster histograms https://github.com/klauspost/compress/pull/620
+ * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
+
+* June 3, 2022 (v1.15.6)
+ * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
+ * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
+ * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
+ * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
+ * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
+ * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
+ * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
+ * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
+ * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
+ * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
+
+* May 25, 2022 (v1.15.5)
+ * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
+ * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
+ * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
+ * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
+ * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
+ * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
+ * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
+ * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
+ * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+
+
+* May 11, 2022 (v1.15.4)
+ * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
+ * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
+ * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
+ * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
+
+* May 5, 2022 (v1.15.3)
+ * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
+ * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
+
+* Apr 26, 2022 (v1.15.2)
+ * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
+ * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
+ * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
+ * Minimum version is Go 1.16, added CI test on 1.18.
+
+* Mar 11, 2022 (v1.15.1)
+ * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
+ * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
+ * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
+ * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
+ * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
+
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
@@ -25,15 +160,15 @@ This package provides various compression algorithms.
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
-
- See Details
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
While the release has been extensively tested, it is recommended to testing when upgrading.
-
+
+ See changes to v1.14.x
+
* Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
@@ -59,7 +194,11 @@ While the release has been extensively tested, it is recommended to testing when
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+
+
+ See changes to v1.13.x
+
* Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@@ -88,6 +227,8 @@ While the release has been extensively tested, it is recommended to testing when
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+
+
See changes to v1.12.x
@@ -483,6 +624,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
+* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
+* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
# license
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index 6f341914c..dac97e58a 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error {
c1.encodeZero(tt[src[ip-2]])
ip -= 2
}
+ src = src[:ip]
// Main compression loop.
switch {
case !s.zeroBits && s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush.
// We do not need to check if any output is 0 bits.
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
c2.encode(tt[v2])
c1.encode(tt[v3])
- ip -= 4
}
case !s.zeroBits:
// We do not need to check if any output is 0 bits.
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
s.bw.flush32()
c2.encode(tt[v2])
c1.encode(tt[v3])
- ip -= 4
}
case s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
- ip -= 4
}
default:
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
s.bw.flush32()
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
- ip -= 4
}
}
@@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) {
for _, v := range in {
s.count[v]++
}
- m := uint32(0)
+ m, symlen := uint32(0), s.symbolLen
for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
if v > m {
m = v
}
- if v > 0 {
- s.symbolLen = uint16(i) + 1
- }
+ symlen = uint16(i) + 1
}
+ s.symbolLen = symlen
return int(m)
}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index 926f5f153..cc05d0f7e 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
- br.init(s.br.unread())
+ if err := br.init(s.br.unread()); err != nil {
+ return err
+ }
var s1, s2 decoder
// Initialize and decode first state and symbol.
diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go
deleted file mode 100644
index ff2c69d60..000000000
--- a/vendor/github.com/klauspost/compress/huff0/autogen.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package huff0
-
-//go:generate go run generate.go
-//go:generate asmfmt -w decompress_amd64.s
-//go:generate asmfmt -w decompress_8b_amd64.s
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 451160edd..e36d9742f 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() {
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
- v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
@@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() {
return
}
if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
+ v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
@@ -165,11 +163,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
-// peekTopBits(n) is equvialent to peekBitFast(64 - n)
-func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
- return uint16(b.value >> n)
-}
-
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
@@ -184,7 +177,6 @@ func (b *bitReaderShifted) fillFast() {
// 2 bounds checks.
v := b.in[b.off-4 : b.off]
- v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
@@ -205,8 +197,7 @@ func (b *bitReaderShifted) fill() {
return
}
if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
+ v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
@@ -220,11 +211,6 @@ func (b *bitReaderShifted) fill() {
}
}
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReaderShifted) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
-}
-
func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index 6bce4e87d..aed2347ce 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -5,8 +5,6 @@
package huff0
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
-// addBits16NC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
- b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
- b.nBits += bits
-}
-
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -70,102 +60,20 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
-// addBits16ZeroNC will add up to 16 bits.
+// encFourSymbols adds up to 32 bits from four symbols.
// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-// This is fastest if bits can be zero.
-func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
- if bits == 0 {
- return
- }
- value <<= (16 - bits) & 15
- value >>= (16 - bits) & 15
- b.bitContainer |= uint64(value) << (b.nBits & 63)
- b.nBits += bits
-}
-
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- return
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- b.bitContainer >>= 1 << 3
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- b.bitContainer >>= 2 << 3
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- b.bitContainer >>= 3 << 3
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- b.bitContainer >>= 4 << 3
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- b.bitContainer >>= 5 << 3
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- b.bitContainer >>= 6 << 3
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- b.bitContainer >>= 7 << 3
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- b.bitContainer = 0
- b.nBits = 0
- return
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.nBits &= 7
+// so the caller must ensure that b has been flushed recently.
+func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
+ bitsA := encA.nBits
+ bitsB := bitsA + encB.nBits
+ bitsC := bitsB + encC.nBits
+ bitsD := bitsC + encD.nBits
+ combined := uint64(encA.val) |
+ (uint64(encB.val) << (bitsA & 63)) |
+ (uint64(encC.val) << (bitsB & 63)) |
+ (uint64(encD.val) << (bitsC & 63))
+ b.bitContainer |= combined << (b.nBits & 63)
+ b.nBits += bitsD
}
// flush32 will flush out, so there are at least 32 bits available for writing.
@@ -201,10 +109,3 @@ func (b *bitWriter) close() error {
b.flushAlign()
return nil
}
-
-// reset and continue writing by appending to out.
-func (b *bitWriter) reset(out []byte) {
- b.bitContainer = 0
- b.nBits = 0
- b.out = out
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
index 50bcdf6ea..4dcab8d23 100644
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0
}
-// advance the stream b n bytes.
-func (b *byteReader) advance(n uint) {
- b.off += int(n)
-}
-
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
-// unread returns the unread portion of the input.
-func (b byteReader) unread() []byte {
- return b.b[b.off:]
-}
-
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index bc95ac623..4ee4fa18d 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
- bw.encTwoSymbols(cTable, tmp[3], tmp[2])
- bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+ bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
}
} else {
for ; n >= 0; n -= 4 {
@@ -365,29 +364,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
m := uint32(0)
if len(s.prevTable) > 0 {
for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
if v > m {
m = v
}
- if v > 0 {
- s.symbolLen = uint16(i) + 1
- if i >= len(s.prevTable) {
- reuse = false
- } else {
- if s.prevTable[i].nBits == 0 {
- reuse = false
- }
- }
+ s.symbolLen = uint16(i) + 1
+ if i >= len(s.prevTable) {
+ reuse = false
+ } else if s.prevTable[i].nBits == 0 {
+ reuse = false
}
}
return int(m), reuse
}
for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
if v > m {
m = v
}
- if v > 0 {
- s.symbolLen = uint16(i) + 1
- }
+ s.symbolLen = uint16(i) + 1
}
return int(m), false
}
@@ -404,6 +403,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
+//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false
@@ -483,34 +483,35 @@ func (s *Scratch) buildCTable() error {
// Different from reference implementation.
huffNode0 := s.nodes[0 : huffNodesLen+1]
- for huffNode[nonNullRank].count == 0 {
+ for huffNode[nonNullRank].count() == 0 {
nonNullRank--
}
lowS := int16(nonNullRank)
nodeRoot := nodeNb + lowS - 1
lowN := nodeNb
- huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
- huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+ huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
+ huffNode[lowS].setParent(nodeNb)
+ huffNode[lowS-1].setParent(nodeNb)
nodeNb++
lowS -= 2
for n := nodeNb; n <= nodeRoot; n++ {
- huffNode[n].count = 1 << 30
+ huffNode[n].setCount(1 << 30)
}
// fake entry, strong barrier
- huffNode0[0].count = 1 << 31
+ huffNode0[0].setCount(1 << 31)
// create parents
for nodeNb <= nodeRoot {
var n1, n2 int16
- if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+ if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n1 = lowS
lowS--
} else {
n1 = lowN
lowN++
}
- if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+ if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n2 = lowS
lowS--
} else {
@@ -518,18 +519,19 @@ func (s *Scratch) buildCTable() error {
lowN++
}
- huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
- huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+ huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
+ huffNode0[n1+1].setParent(nodeNb)
+ huffNode0[n2+1].setParent(nodeNb)
nodeNb++
}
// distribute weights (unlimited tree height)
- huffNode[nodeRoot].nbBits = 0
+ huffNode[nodeRoot].setNbBits(0)
for n := nodeRoot - 1; n >= startNode; n-- {
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+ huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
for n := uint16(0); n <= nonNullRank; n++ {
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+ huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
maxNbBits := s.actualTableLog
@@ -541,7 +543,7 @@ func (s *Scratch) buildCTable() error {
var nbPerRank [tableLogMax + 1]uint16
var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] {
- nbPerRank[v.nbBits]++
+ nbPerRank[v.nbBits()]++
}
// determine stating value per rank
{
@@ -556,7 +558,7 @@ func (s *Scratch) buildCTable() error {
// push nbBits per symbol, symbol order
for _, v := range huffNode[:nonNullRank+1] {
- s.cTable[v.symbol].nBits = v.nbBits
+ s.cTable[v.symbol()].nBits = v.nbBits()
}
// assign value within rank, symbol order
@@ -602,12 +604,12 @@ func (s *Scratch) huffSort() {
pos := rank[r].current
rank[r].current++
prev := nodes[(pos-1)&huffNodesMask]
- for pos > rank[r].base && c > prev.count {
+ for pos > rank[r].base && c > prev.count() {
nodes[pos&huffNodesMask] = prev
pos--
prev = nodes[(pos-1)&huffNodesMask]
}
- nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+ nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
}
}
@@ -616,7 +618,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen]
- largestBits := huffNode[lastNonNull].nbBits
+ largestBits := huffNode[lastNonNull].nbBits()
// early exit : no elt > maxNbBits
if largestBits <= maxNbBits {
@@ -626,14 +628,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
baseCost := int(1) << (largestBits - maxNbBits)
n := uint32(lastNonNull)
- for huffNode[n].nbBits > maxNbBits {
- totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
- huffNode[n].nbBits = maxNbBits
+ for huffNode[n].nbBits() > maxNbBits {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
+ huffNode[n].setNbBits(maxNbBits)
n--
}
// n stops at huffNode[n].nbBits <= maxNbBits
- for huffNode[n].nbBits == maxNbBits {
+ for huffNode[n].nbBits() == maxNbBits {
n--
}
// n end at index of smallest symbol using < maxNbBits
@@ -654,10 +656,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
{
currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- {
- if huffNode[pos].nbBits >= currentNbBits {
+ if huffNode[pos].nbBits() >= currentNbBits {
continue
}
- currentNbBits = huffNode[pos].nbBits // < maxNbBits
+ currentNbBits = huffNode[pos].nbBits() // < maxNbBits
rankLast[maxNbBits-currentNbBits] = uint32(pos)
}
}
@@ -674,8 +676,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
if lowPos == noSymbol {
break
}
- highTotal := huffNode[highPos].count
- lowTotal := 2 * huffNode[lowPos].count
+ highTotal := huffNode[highPos].count()
+ lowTotal := 2 * huffNode[lowPos].count()
if highTotal <= lowTotal {
break
}
@@ -691,13 +693,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
// this rank is no longer empty
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
}
- huffNode[rankLast[nBitsToDecrease]].nbBits++
+ huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
+ huffNode[rankLast[nBitsToDecrease]].nbBits())
if rankLast[nBitsToDecrease] == 0 {
/* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol
} else {
rankLast[nBitsToDecrease]--
- if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+ if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
}
}
@@ -705,15 +708,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
for totalCost < 0 { /* Sometimes, cost correction overshoot */
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
- for huffNode[n].nbBits == maxNbBits {
+ for huffNode[n].nbBits() == maxNbBits {
n--
}
- huffNode[n+1].nbBits--
+ huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
rankLast[1] = n + 1
totalCost++
continue
}
- huffNode[rankLast[1]+1].nbBits--
+ huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
rankLast[1]++
totalCost++
}
@@ -721,9 +724,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
return maxNbBits
}
-type nodeElt struct {
- count uint32
- parent uint16
- symbol byte
- nbBits uint8
+// A nodeElt is the fields
+//
+// count uint32
+// parent uint16
+// symbol byte
+// nbBits uint8
+//
+// in some order, all squashed into an integer so that the compiler
+// always loads and stores entire nodeElts instead of separate fields.
+type nodeElt uint64
+
+func makeNodeElt(count uint32, symbol byte) nodeElt {
+ return nodeElt(count) | nodeElt(symbol)<<48
}
+
+func (e *nodeElt) count() uint32 { return uint32(*e) }
+func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
+func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
+func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
+
+func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
+func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
+func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 04f652995..3c0b398c7 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -11,7 +11,6 @@ import (
type dTable struct {
single []dEntrySingle
- double []dEntryDouble
}
// single-symbols decoding
@@ -19,13 +18,6 @@ type dEntrySingle struct {
entry uint16
}
-// double-symbols decoding
-type dEntryDouble struct {
- seq [4]byte
- nBits uint8
- len uint8
-}
-
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
@@ -35,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
- s, err = s.prepare(in)
+ s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
@@ -69,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
b, err := fse.Decompress(in[:iSize], s.fse)
s.fse.Out = nil
if err != nil {
- return s, nil, err
+ return s, nil, fmt.Errorf("fse decompress returned: %w", err)
}
if len(b) > 255 {
return s, nil, errors.New("corrupt input: output table too large")
@@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
return &[4][256]byte{}
}
-// Decompress1X will decompress a 1X encoded stream.
-// The cap of the output buffer will be the maximum decompressed size.
-// The length of the supplied input must match the end of a block exactly.
-func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress1X8Bit(dst, src)
- }
- var br bitReaderShifted
- err := br.init(src)
- if err != nil {
- return dst, err
- }
- maxDecodedSize := cap(dst)
- dst = dst[:0]
-
- // Avoid bounds check by always having full sized table.
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- dt := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- bufs := d.buffer()
- buf := &bufs[0]
- var off uint8
-
- for br.off >= 8 {
- br.fillFast()
- v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- // Refill
- br.fillFast()
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- d.bufs.Put(bufs)
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:]...)
- }
- }
-
- if len(dst)+int(off) > maxDecodedSize {
- d.bufs.Put(bufs)
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:off]...)
-
- // br < 8, so uint8 is fine
- bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- if len(dst) >= maxDecodedSize {
- d.bufs.Put(bufs)
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
- nBits := uint8(v.entry)
- br.advance(nBits)
- bitsLeft -= nBits
- dst = append(dst, uint8(v.entry>>8))
- }
- d.bufs.Put(bufs)
- return dst, br.close()
-}
-
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
@@ -873,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -995,7 +888,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
@@ -1108,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
deleted file mode 100644
index 0d6cb1a96..000000000
--- a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
+++ /dev/null
@@ -1,488 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-#include "funcdata.h"
-#include "go_asm.h"
-
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table
-
-// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
-TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
-#define off R8
-#define buffer DI
-#define table SI
-
-#define br_bits_read R9
-#define br_value R10
-#define br_offset R11
-#define peek_bits R12
-#define exhausted DX
-
-#define br0 R13
-#define br1 R14
-#define br2 R15
-#define br3 BP
-
- MOVQ BP, 0(SP)
-
- XORQ exhausted, exhausted // exhausted = false
- XORQ off, off // off = 0
-
- MOVBQZX peekBits+32(FP), peek_bits
- MOVQ buf+40(FP), buffer
- MOVQ tbl+48(FP), table
-
- MOVQ pbr0+0(FP), br0
- MOVQ pbr1+8(FP), br1
- MOVQ pbr2+16(FP), br2
- MOVQ pbr3+24(FP), br3
-
-main_loop:
-
- // const stream = 0
- // br0.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
- MOVQ bitReaderShifted_value(br0), br_value
- MOVQ bitReaderShifted_off(br0), br_offset
-
- // if b.bitsRead >= 32 {
- CMPQ br_bits_read, $32
- JB skip_fill0
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br0), AX
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
-
- // b.value |= uint64(low) << (b.bitsRead & 63)
- MOVQ br_bits_read, CX
- SHLQ CL, AX
- ORQ AX, br_value
-
- // exhausted = exhausted || (br0.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
-skip_fill0:
-
- // val0 := br0.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br0.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val1 := br0.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br0.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 0(buffer)(off*1)
-
- // SECOND PART:
- // val2 := br0.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v2 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br0.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val3 := br0.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v3 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br0.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off+2] = uint8(v2.entry >> 8)
- // buf[stream][off+3] = uint8(v3.entry >> 8)
- MOVW BX, 0+2(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
- MOVQ br_value, bitReaderShifted_value(br0)
- MOVQ br_offset, bitReaderShifted_off(br0)
-
- // const stream = 1
- // br1.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
- MOVQ bitReaderShifted_value(br1), br_value
- MOVQ bitReaderShifted_off(br1), br_offset
-
- // if b.bitsRead >= 32 {
- CMPQ br_bits_read, $32
- JB skip_fill1
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br1), AX
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
-
- // b.value |= uint64(low) << (b.bitsRead & 63)
- MOVQ br_bits_read, CX
- SHLQ CL, AX
- ORQ AX, br_value
-
- // exhausted = exhausted || (br1.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
-skip_fill1:
-
- // val0 := br1.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br1.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val1 := br1.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br1.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 256(buffer)(off*1)
-
- // SECOND PART:
- // val2 := br1.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v2 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br1.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val3 := br1.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v3 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br1.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off+2] = uint8(v2.entry >> 8)
- // buf[stream][off+3] = uint8(v3.entry >> 8)
- MOVW BX, 256+2(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
- MOVQ br_value, bitReaderShifted_value(br1)
- MOVQ br_offset, bitReaderShifted_off(br1)
-
- // const stream = 2
- // br2.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
- MOVQ bitReaderShifted_value(br2), br_value
- MOVQ bitReaderShifted_off(br2), br_offset
-
- // if b.bitsRead >= 32 {
- CMPQ br_bits_read, $32
- JB skip_fill2
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br2), AX
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
-
- // b.value |= uint64(low) << (b.bitsRead & 63)
- MOVQ br_bits_read, CX
- SHLQ CL, AX
- ORQ AX, br_value
-
- // exhausted = exhausted || (br2.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
-skip_fill2:
-
- // val0 := br2.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br2.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val1 := br2.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br2.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 512(buffer)(off*1)
-
- // SECOND PART:
- // val2 := br2.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v2 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br2.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val3 := br2.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v3 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br2.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off+2] = uint8(v2.entry >> 8)
- // buf[stream][off+3] = uint8(v3.entry >> 8)
- MOVW BX, 512+2(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
- MOVQ br_value, bitReaderShifted_value(br2)
- MOVQ br_offset, bitReaderShifted_off(br2)
-
- // const stream = 3
- // br3.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
- MOVQ bitReaderShifted_value(br3), br_value
- MOVQ bitReaderShifted_off(br3), br_offset
-
- // if b.bitsRead >= 32 {
- CMPQ br_bits_read, $32
- JB skip_fill3
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br3), AX
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
-
- // b.value |= uint64(low) << (b.bitsRead & 63)
- MOVQ br_bits_read, CX
- SHLQ CL, AX
- ORQ AX, br_value
-
- // exhausted = exhausted || (br3.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
-skip_fill3:
-
- // val0 := br3.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br3.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val1 := br3.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br3.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 768(buffer)(off*1)
-
- // SECOND PART:
- // val2 := br3.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v2 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br3.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val3 := br3.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v3 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br3.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // these two writes get coalesced
- // buf[stream][off+2] = uint8(v2.entry >> 8)
- // buf[stream][off+3] = uint8(v3.entry >> 8)
- MOVW BX, 768+2(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
- MOVQ br_value, bitReaderShifted_value(br3)
- MOVQ br_offset, bitReaderShifted_off(br3)
-
- ADDQ $4, off // off += 2
-
- TESTB DH, DH // any br[i].ofs < 4?
- JNZ end
-
- CMPQ off, $bufoff
- JL main_loop
-
-end:
- MOVQ 0(SP), BP
-
- MOVB off, ret+56(FP)
- RET
-
-#undef off
-#undef buffer
-#undef table
-
-#undef br_bits_read
-#undef br_value
-#undef br_offset
-#undef peek_bits
-#undef exhausted
-
-#undef br0
-#undef br1
-#undef br2
-#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
deleted file mode 100644
index 6d477a2c1..000000000
--- a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
+++ /dev/null
@@ -1,197 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-#include "funcdata.h"
-#include "go_asm.h"
-
-
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table
-
-//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
-TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
-#define off R8
-#define buffer DI
-#define table SI
-
-#define br_bits_read R9
-#define br_value R10
-#define br_offset R11
-#define peek_bits R12
-#define exhausted DX
-
-#define br0 R13
-#define br1 R14
-#define br2 R15
-#define br3 BP
-
- MOVQ BP, 0(SP)
-
- XORQ exhausted, exhausted // exhausted = false
- XORQ off, off // off = 0
-
- MOVBQZX peekBits+32(FP), peek_bits
- MOVQ buf+40(FP), buffer
- MOVQ tbl+48(FP), table
-
- MOVQ pbr0+0(FP), br0
- MOVQ pbr1+8(FP), br1
- MOVQ pbr2+16(FP), br2
- MOVQ pbr3+24(FP), br3
-
-main_loop:
-{{ define "decode_2_values_x86" }}
- // const stream = {{ var "id" }}
- // br{{ var "id"}}.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
- MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
- MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
-
- // if b.bitsRead >= 32 {
- CMPQ br_bits_read, $32
- JB skip_fill{{ var "id" }}
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
-
- // b.value |= uint64(low) << (b.bitsRead & 63)
- MOVQ br_bits_read, CX
- SHLQ CL, AX
- ORQ AX, br_value
-
- // exhausted = exhausted || (br{{ var "id"}}.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
- // }
-skip_fill{{ var "id" }}:
-
- // val0 := br{{ var "id"}}.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br{{ var "id"}}.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val1 := br{{ var "id"}}.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br{{ var "id"}}.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
-
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
-
- // SECOND PART:
- // val2 := br{{ var "id"}}.peekTopBits(peekBits)
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v2 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br{{ var "id"}}.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
- // val3 := br{{ var "id"}}.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
- // v3 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br{{ var "id"}}.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
- MOVBQZX AL, CX
- SHLQ CX, br_value // value <<= n
- ADDQ CX, br_bits_read // bits_read += n
-
-
- // these two writes get coalesced
- // buf[stream][off+2] = uint8(v2.entry >> 8)
- // buf[stream][off+3] = uint8(v3.entry >> 8)
- MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
- MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
- MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
-{{ end }}
-
- {{ set "id" "0" }}
- {{ set "ofs" "0" }}
- {{ set "bufofs" "0" }} {{/* id * bufoff */}}
- {{ template "decode_2_values_x86" . }}
-
- {{ set "id" "1" }}
- {{ set "ofs" "8" }}
- {{ set "bufofs" "256" }}
- {{ template "decode_2_values_x86" . }}
-
- {{ set "id" "2" }}
- {{ set "ofs" "16" }}
- {{ set "bufofs" "512" }}
- {{ template "decode_2_values_x86" . }}
-
- {{ set "id" "3" }}
- {{ set "ofs" "24" }}
- {{ set "bufofs" "768" }}
- {{ template "decode_2_values_x86" . }}
-
- ADDQ $4, off // off += 2
-
- TESTB DH, DH // any br[i].ofs < 4?
- JNZ end
-
- CMPQ off, $bufoff
- JL main_loop
-end:
- MOVQ 0(SP), BP
-
- MOVB off, ret+56(FP)
- RET
-#undef off
-#undef buffer
-#undef table
-
-#undef br_bits_read
-#undef br_value
-#undef br_offset
-#undef peek_bits
-#undef exhausted
-
-#undef br0
-#undef br1
-#undef br2
-#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index d47f6644f..ba7e8e6b0 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -2,30 +2,42 @@
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
-// that uses an asm implementation of its main loop.
+// and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0
import (
"errors"
"fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
-// go:noescape
-func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
- peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+//
+//go:noescape
+func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
-// go:noescape
-func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
- peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+//
+//go:noescape
+func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
+type decompress4xContext struct {
+ pbr *[4]bitReaderShifted
+ peekBits uint8
+ out *byte
+ dstEvery int
+ tbl *dEntrySingle
+ decoded int
+ limit *byte
+}
+
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
@@ -42,6 +54,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
+
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
@@ -71,70 +84,25 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
- // Use temp table to avoid bound checks/append penalty.
- buf := d.buffer()
- var off uint8
var decoded int
- const debug = false
-
- // see: bitReaderShifted.peekBitsFast()
- peekBits := uint8((64 - d.actualTableLog) & 63)
-
- // Decode 2 values from each decoder/loop.
- const bufoff = 256
- for {
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
- break
+ if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
+ ctx := decompress4xContext{
+ pbr: &br,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ out: &out[0],
+ dstEvery: dstEvery,
+ tbl: &single[0],
+ limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
}
-
if use8BitTables {
- off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ decompress4x_8b_main_loop_amd64(&ctx)
} else {
- off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
- }
- if debug {
- fmt.Print("DEBUG: ")
- fmt.Printf("off=%d,", off)
- for i := 0; i < 4; i++ {
- fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
- i, br[i].bitsRead, br[i].value, br[i].off)
- }
- fmt.Println("")
- }
-
- if off != 0 {
- break
+ decompress4x_main_loop_amd64(&ctx)
}
- if bufoff > dstEvery {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 1")
- }
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
- // There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 2")
- }
- }
- if off > 0 {
- ioff := int(off)
- if len(out) < dstEvery*3+ioff {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 3")
- }
- copy(out, buf[0][:off])
- copy(out[dstEvery:], buf[1][:off])
- copy(out[dstEvery*2:], buf[2][:off])
- copy(out[dstEvery*3:], buf[3][:off])
- decoded += int(off) * 4
- out = out[off:]
+ decoded = ctx.decoded
+ out = out[decoded/4:]
}
// Decode remaining.
@@ -150,7 +118,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
- d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -164,7 +131,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
offset++
}
if offset != endsAt {
- d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
@@ -173,9 +139,88 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err
}
}
- d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+
+// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+
+type decompress1xContext struct {
+ pbr *bitReaderShifted
+ peekBits uint8
+ out *byte
+ outCap int
+ tbl *dEntrySingle
+ decoded int
+}
+
+// Error reported by asm implementations
+const error_max_decoded_size_exeeded = -1
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:maxDecodedSize]
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+
+ if maxDecodedSize >= 4 {
+ ctx := decompress1xContext{
+ pbr: &br,
+ out: &dst[0],
+ outCap: maxDecodedSize,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ tbl: &d.dt.single[0],
+ }
+
+ if cpuinfo.HasBMI2() {
+ decompress1x_main_loop_bmi2(&ctx)
+ } else {
+ decompress1x_main_loop_amd64(&ctx)
+ }
+ if ctx.decoded == error_max_decoded_size_exeeded {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+
+ dst = dst[:ctx.decoded]
+ }
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index 2edad3ea5..c4c7ab2d1 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -1,506 +1,830 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-#include "funcdata.h"
-#include "go_asm.h"
-
-#ifdef GOAMD64_v4
-#ifndef GOAMD64_v3
-#define GOAMD64_v3
-#endif
-#endif
-
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table
-
-// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
-TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
-#define off R8
-#define buffer DI
-#define table SI
-
-#define br_bits_read R9
-#define br_value R10
-#define br_offset R11
-#define peek_bits R12
-#define exhausted DX
-
-#define br0 R13
-#define br1 R14
-#define br2 R15
-#define br3 BP
-
- MOVQ BP, 0(SP)
-
- XORQ exhausted, exhausted // exhausted = false
- XORQ off, off // off = 0
-
- MOVBQZX peekBits+32(FP), peek_bits
- MOVQ buf+40(FP), buffer
- MOVQ tbl+48(FP), table
-
- MOVQ pbr0+0(FP), br0
- MOVQ pbr1+8(FP), br1
- MOVQ pbr2+16(FP), br2
- MOVQ pbr3+24(FP), br3
-
+// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
+
+//go:build amd64 && !appengine && !noasm && gc
+
+// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_main_loop_amd64(SB), $0-8
+ // Preload values
+ MOVQ ctx+0(FP), AX
+ MOVBQZX 8(AX), DI
+ MOVQ 16(AX), BX
+ MOVQ 48(AX), SI
+ MOVQ 24(AX), R8
+ MOVQ 32(AX), R9
+ MOVQ (AX), R10
+
+ // Main loop
main_loop:
-
- // const stream = 0
- // br0.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
- MOVQ bitReaderShifted_value(br0), br_value
- MOVQ bitReaderShifted_off(br0), br_offset
-
- // We must have at least 2 * max tablelog left
- CMPQ br_bits_read, $64-22
- JBE skip_fill0
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br0), AX
+ XORL DX, DX
+ CMPQ BX, SI
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R10), R11
+ MOVBQZX 40(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill0
+ MOVQ 24(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ (R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
-#ifdef GOAMD64_v3
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
-
-#else
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
- MOVQ br_bits_read, CX
- SHLQ CL, AX
-
-#endif
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 24(R10)
+ ORQ R13, R11
- ORQ AX, br_value
+ // exhausted += (br0.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
- // exhausted = exhausted || (br0.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
skip_fill0:
-
// val0 := br0.peekTopBits(peekBits)
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-
-#else
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
-#endif
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br0.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
+ MOVW (R9)(R13*2), CX
-#endif
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
- ADDQ CX, br_bits_read // bits_read += n
-
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-
-#else
// val1 := br0.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
-#endif
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
-
-#endif
-
- ADDQ CX, br_bits_read // bits_read += n
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
// these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 0(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
- MOVQ br_value, bitReaderShifted_value(br0)
- MOVQ br_offset, bitReaderShifted_off(br0)
-
- // const stream = 1
- // br1.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
- MOVQ bitReaderShifted_value(br1), br_value
- MOVQ bitReaderShifted_off(br1), br_offset
-
- // We must have at least 2 * max tablelog left
- CMPQ br_bits_read, $64-22
- JBE skip_fill1
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br1), AX
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (BX)
+
+ // update the bitreader structure
+ MOVQ R11, 32(R10)
+ MOVB R12, 40(R10)
+
+ // br1.fillFast32()
+ MOVQ 80(R10), R11
+ MOVBQZX 88(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill1
+ MOVQ 72(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ 48(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
-#ifdef GOAMD64_v3
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
-
-#else
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
- MOVQ br_bits_read, CX
- SHLQ CL, AX
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 72(R10)
+ ORQ R13, R11
-#endif
+ // exhausted += (br1.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
- ORQ AX, br_value
-
- // exhausted = exhausted || (br1.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
skip_fill1:
-
// val0 := br1.peekTopBits(peekBits)
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-
-#else
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
-#endif
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br1.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
+ MOVW (R9)(R13*2), CX
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
-#endif
-
- ADDQ CX, br_bits_read // bits_read += n
-
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-
-#else
// val1 := br1.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
-#endif
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
-
-#endif
-
- ADDQ CX, br_bits_read // bits_read += n
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
// these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 256(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
- MOVQ br_value, bitReaderShifted_value(br1)
- MOVQ br_offset, bitReaderShifted_off(br1)
-
- // const stream = 2
- // br2.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
- MOVQ bitReaderShifted_value(br2), br_value
- MOVQ bitReaderShifted_off(br2), br_offset
-
- // We must have at least 2 * max tablelog left
- CMPQ br_bits_read, $64-22
- JBE skip_fill2
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br2), AX
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (BX)(R8*1)
+
+ // update the bitreader structure
+ MOVQ R11, 80(R10)
+ MOVB R12, 88(R10)
+
+ // br2.fillFast32()
+ MOVQ 128(R10), R11
+ MOVBQZX 136(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill2
+ MOVQ 120(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ 96(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
-#ifdef GOAMD64_v3
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 120(R10)
+ ORQ R13, R11
-#else
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
- MOVQ br_bits_read, CX
- SHLQ CL, AX
+ // exhausted += (br2.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
-#endif
-
- ORQ AX, br_value
-
- // exhausted = exhausted || (br2.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
-
- // }
skip_fill2:
-
// val0 := br2.peekTopBits(peekBits)
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-
-#else
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
-#endif
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
+ MOVW (R9)(R13*2), CX
- // br2.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
-
-#endif
-
- ADDQ CX, br_bits_read // bits_read += n
-
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-
-#else
// val1 := br2.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-
-#endif
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (BX)(R8*2)
+
+ // update the bitreader structure
+ MOVQ R11, 128(R10)
+ MOVB R12, 136(R10)
+
+ // br3.fillFast32()
+ MOVQ 176(R10), R11
+ MOVBQZX 184(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill3
+ MOVQ 168(R10), AX
+ SUBQ $0x20, R12
+ SUBQ $0x04, AX
+ MOVQ 144(R10), R13
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 168(R10)
+ ORQ R13, R11
-#endif
+ // exhausted += (br3.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
- ADDQ CX, br_bits_read // bits_read += n
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 512(buffer)(off*1)
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
- MOVQ br_value, bitReaderShifted_value(br2)
- MOVQ br_offset, bitReaderShifted_off(br2)
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
- // const stream = 3
- // br3.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
- MOVQ bitReaderShifted_value(br3), br_value
- MOVQ bitReaderShifted_off(br3), br_offset
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R11, R13
+ SHRQ CL, R13
- // We must have at least 2 * max tablelog left
- CMPQ br_bits_read, $64-22
- JBE skip_fill3
+ // v1 := table[val1&mask]
+ MOVW (R9)(R13*2), CX
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
+ // br3.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br3), AX
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ LEAQ (R8)(R8*2), CX
+ MOVW AX, (BX)(CX*1)
+
+ // update the bitreader structure
+ MOVQ R11, 176(R10)
+ MOVB R12, 184(R10)
+ ADDQ $0x02, BX
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
+ RET
+
+// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
+ // Preload values
+ MOVQ ctx+0(FP), CX
+ MOVBQZX 8(CX), DI
+ MOVQ 16(CX), BX
+ MOVQ 48(CX), SI
+ MOVQ 24(CX), R8
+ MOVQ 32(CX), R9
+ MOVQ (CX), R10
+
+ // Main loop
+main_loop:
+ XORL DX, DX
+ CMPQ BX, SI
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R10), R11
+ MOVBQZX 40(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill0
+ MOVQ 24(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ (R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
-#ifdef GOAMD64_v3
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 24(R10)
+ ORQ R14, R11
-#else
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
- MOVQ br_bits_read, CX
- SHLQ CL, AX
+ // exhausted += (br0.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
-#endif
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
- ORQ AX, br_value
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
- // exhausted = exhausted || (br3.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
- // }
-skip_fill3:
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br0.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (BX)
+
+ // update the bitreader structure
+ MOVQ R11, 32(R10)
+ MOVB R12, 40(R10)
+
+ // br1.fillFast32()
+ MOVQ 80(R10), R11
+ MOVBQZX 88(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill1
+ MOVQ 72(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 48(R10), R14
- // val0 := br3.peekTopBits(peekBits)
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 72(R10)
+ ORQ R14, R11
-#else
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
+ // exhausted += (br1.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
-#endif
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br3.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
+ MOVW (R9)(R13*2), CX
-#endif
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
- ADDQ CX, br_bits_read // bits_read += n
-
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br1.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (BX)(R8*1)
+
+ // update the bitreader structure
+ MOVQ R11, 80(R10)
+ MOVB R12, 88(R10)
+
+ // br2.fillFast32()
+ MOVQ 128(R10), R11
+ MOVBQZX 136(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill2
+ MOVQ 120(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 96(R10), R14
-#else
- // val1 := br3.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 120(R10)
+ ORQ R14, R11
-#endif
+ // exhausted += (br2.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
- // br3.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br2.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (BX)(R8*2)
+
+ // update the bitreader structure
+ MOVQ R11, 128(R10)
+ MOVB R12, 136(R10)
+
+ // br3.fillFast32()
+ MOVQ 176(R10), R11
+ MOVBQZX 184(R10), R12
+ CMPQ R12, $0x20
+ JBE skip_fill3
+ MOVQ 168(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 144(R10), R14
-#endif
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 168(R10)
+ ORQ R14, R11
- ADDQ CX, br_bits_read // bits_read += n
+ // exhausted += (br3.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, 768(buffer)(off*1)
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
- MOVQ br_value, bitReaderShifted_value(br3)
- MOVQ br_offset, bitReaderShifted_off(br3)
+ // v0 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
- ADDQ $2, off // off += 2
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
- TESTB DH, DH // any br[i].ofs < 4?
- JNZ end
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v1 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v2 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R11
+ ADDB CL, R12
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ R11, R13
+ MOVQ DI, CX
+ SHRQ CL, R13
+
+ // v3 := table[val0&mask]
+ MOVW (R9)(R13*2), CX
+
+ // br3.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R11
+ ADDB CL, R12
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ LEAQ (R8)(R8*2), CX
+ MOVL AX, (BX)(CX*1)
+
+ // update the bitreader structure
+ MOVQ R11, 176(R10)
+ MOVB R12, 184(R10)
+ ADDQ $0x04, BX
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
+ RET
- CMPQ off, $bufoff
- JL main_loop
+// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+TEXT ·decompress1x_main_loop_amd64(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exceeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
-end:
- MOVQ 0(SP), BP
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exceeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_1_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_2_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
- MOVB off, ret+56(FP)
+ // Report error
+error_max_decoded_size_exceeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
RET
-#undef off
-#undef buffer
-#undef table
+// func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+// Requires: BMI2
+TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exceeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
-#undef br_bits_read
-#undef br_value
-#undef br_offset
-#undef peek_bits
-#undef exhausted
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exceeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_1_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_2_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
-#undef br0
-#undef br1
-#undef br2
-#undef br3
+ // Report error
+error_max_decoded_size_exceeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
deleted file mode 100644
index 330d86ae1..000000000
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
+++ /dev/null
@@ -1,195 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-#include "funcdata.h"
-#include "go_asm.h"
-
-#ifdef GOAMD64_v4
-#ifndef GOAMD64_v3
-#define GOAMD64_v3
-#endif
-#endif
-
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table
-
-//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
-TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
-#define off R8
-#define buffer DI
-#define table SI
-
-#define br_bits_read R9
-#define br_value R10
-#define br_offset R11
-#define peek_bits R12
-#define exhausted DX
-
-#define br0 R13
-#define br1 R14
-#define br2 R15
-#define br3 BP
-
- MOVQ BP, 0(SP)
-
- XORQ exhausted, exhausted // exhausted = false
- XORQ off, off // off = 0
-
- MOVBQZX peekBits+32(FP), peek_bits
- MOVQ buf+40(FP), buffer
- MOVQ tbl+48(FP), table
-
- MOVQ pbr0+0(FP), br0
- MOVQ pbr1+8(FP), br1
- MOVQ pbr2+16(FP), br2
- MOVQ pbr3+24(FP), br3
-
-main_loop:
-{{ define "decode_2_values_x86" }}
- // const stream = {{ var "id" }}
- // br{{ var "id"}}.fillFast()
- MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
- MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
- MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
-
- // We must have at least 2 * max tablelog left
- CMPQ br_bits_read, $64-22
- JBE skip_fill{{ var "id" }}
-
- SUBQ $32, br_bits_read // b.bitsRead -= 32
- SUBQ $4, br_offset // b.off -= 4
-
- // v := b.in[b.off-4 : b.off]
- // v = v[:4]
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
-
- // b.value |= uint64(low) << (b.bitsRead & 63)
-#ifdef GOAMD64_v3
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
-#else
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
- MOVQ br_bits_read, CX
- SHLQ CL, AX
-#endif
-
- ORQ AX, br_value
-
- // exhausted = exhausted || (br{{ var "id"}}.off < 4)
- CMPQ br_offset, $4
- SETLT DL
- ORB DL, DH
- // }
-skip_fill{{ var "id" }}:
-
- // val0 := br{{ var "id"}}.peekTopBits(peekBits)
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-#else
- MOVQ br_value, AX
- MOVQ peek_bits, CX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-#endif
-
- // v0 := table[val0&mask]
- MOVW 0(table)(AX*2), AX // AX - v0
-
- // br{{ var "id"}}.advance(uint8(v0.entry))
- MOVB AH, BL // BL = uint8(v0.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
-#endif
-
- ADDQ CX, br_bits_read // bits_read += n
-
-
-#ifdef GOAMD64_v3
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
-#else
- // val1 := br{{ var "id"}}.peekTopBits(peekBits)
- MOVQ peek_bits, CX
- MOVQ br_value, AX
- SHRQ CL, AX // AX = (value >> peek_bits) & mask
-#endif
-
- // v1 := table[val1&mask]
- MOVW 0(table)(AX*2), AX // AX - v1
-
- // br{{ var "id"}}.advance(uint8(v1.entry))
- MOVB AH, BH // BH = uint8(v1.entry >> 8)
-
-#ifdef GOAMD64_v3
- MOVBQZX AL, CX
- SHLXQ AX, br_value, br_value // value <<= n
-#else
- MOVBQZX AL, CX
- SHLQ CL, br_value // value <<= n
-#endif
-
- ADDQ CX, br_bits_read // bits_read += n
-
-
- // these two writes get coalesced
- // buf[stream][off] = uint8(v0.entry >> 8)
- // buf[stream][off+1] = uint8(v1.entry >> 8)
- MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
-
- // update the bitrader reader structure
- MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
- MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
- MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
-{{ end }}
-
- {{ set "id" "0" }}
- {{ set "ofs" "0" }}
- {{ set "bufofs" "0" }} {{/* id * bufoff */}}
- {{ template "decode_2_values_x86" . }}
-
- {{ set "id" "1" }}
- {{ set "ofs" "8" }}
- {{ set "bufofs" "256" }}
- {{ template "decode_2_values_x86" . }}
-
- {{ set "id" "2" }}
- {{ set "ofs" "16" }}
- {{ set "bufofs" "512" }}
- {{ template "decode_2_values_x86" . }}
-
- {{ set "id" "3" }}
- {{ set "ofs" "24" }}
- {{ set "bufofs" "768" }}
- {{ template "decode_2_values_x86" . }}
-
- ADDQ $2, off // off += 2
-
- TESTB DH, DH // any br[i].ofs < 4?
- JNZ end
-
- CMPQ off, $bufoff
- JL main_loop
-end:
- MOVQ 0(SP), BP
-
- MOVB off, ret+56(FP)
- RET
-#undef off
-#undef buffer
-#undef table
-
-#undef br_bits_read
-#undef br_value
-#undef br_offset
-#undef peek_bits
-#undef exhausted
-
-#undef br0
-#undef br1
-#undef br2
-#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 126b4d68a..908c17de6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -191,3 +195,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
}
return dst, nil
}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress1X8Bit(dst, src)
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ dt := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ for br.off >= 8 {
+ br.fillFast()
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ // Refill
+ br.fillFast()
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
new file mode 100644
index 000000000..3954c5121
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
@@ -0,0 +1,34 @@
+// Package cpuinfo gives runtime info about the current CPU.
+//
+// This is a very limited module meant for use internally
+// in this project. For more versatile solution check
+// https://github.com/klauspost/cpuid.
+package cpuinfo
+
+// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
+func HasBMI1() bool {
+ return hasBMI1
+}
+
+// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
+func HasBMI2() bool {
+ return hasBMI2
+}
+
+// DisableBMI2 will disable BMI2, for testing purposes.
+// Call returned function to restore previous state.
+func DisableBMI2() func() {
+ old := hasBMI2
+ hasBMI2 = false
+ return func() {
+ hasBMI2 = old
+ }
+}
+
+// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
+func HasBMI() bool {
+ return HasBMI1() && HasBMI2()
+}
+
+var hasBMI1 bool
+var hasBMI2 bool
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
new file mode 100644
index 000000000..e802579c4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
@@ -0,0 +1,11 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package cpuinfo
+
+// go:noescape
+func x86extensions() (bmi1, bmi2 bool)
+
+func init() {
+ hasBMI1, hasBMI2 = x86extensions()
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
new file mode 100644
index 000000000..4465fbe9e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
@@ -0,0 +1,36 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+TEXT ·x86extensions(SB), NOSPLIT, $0
+ // 1. determine max EAX value
+ XORQ AX, AX
+ CPUID
+
+ CMPQ AX, $7
+ JB unsupported
+
+ // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
+ MOVQ $7, AX
+ MOVQ $0, CX
+ CPUID
+
+ BTQ $3, BX // bit 3 = BMI1
+ SETCS AL
+
+ BTQ $8, BX // bit 8 = BMI2
+ SETCS AH
+
+ MOVB AL, bmi1+0(FP)
+ MOVB AH, bmi2+1(FP)
+ RET
+
+unsupported:
+ XORQ AX, AX
+ MOVB AL, bmi1+0(FP)
+ MOVB AL, bmi2+1(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba65d..05db94d39 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
+//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -100,13 +103,36 @@ func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
+// EncodeBlockInto exposes encodeBlock but checks dst size.
+func EncodeBlockInto(dst, src []byte) (d int) {
+ if MaxEncodedLen(len(src)) > len(dst) {
+ return 0
+ }
+
+ // encodeBlock breaks on too big blocks, so split.
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return d
+}
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index e3445ac19..65b38abed 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
## Installation
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
@@ -386,47 +388,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
### Benchmarks
-These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
-
The first two are streaming decodes and the last are smaller inputs.
-
+
+Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
+
```
-BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
-BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
-
-BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
-BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
-
-Concurrent performance:
-
-BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
-
-BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
+BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
+BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
+
+Concurrent blocks, performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
```
-This reflects the performance around May 2020, but this may be out of date.
+This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index d7cd15ba2..97299d499 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v
}
-func (b *bitReader) get16BitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- b.bitsRead += n
- return v
-}
-
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index b36618285..78b3c61be 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -5,8 +5,6 @@
package zstd
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.bitContainer >>= v << 3
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7d567a54a..5f272d87f 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,9 +5,14 @@
package zstd
import (
+ "bytes"
+ "encoding/binary"
"errors"
"fmt"
+ "hash/crc32"
"io"
+ "os"
+ "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -38,14 +43,14 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
+ compressedBlockOverAlloc = 16
+ maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
+
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
- maxCompressedLiteralSize = 1 << 18
- maxRLELiteralSize = 1 << 20
- maxMatchLen = 131074
- maxSequences = 0x7f00 + 0xffff
+ maxMatchLen = 131074
+ maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
@@ -78,8 +83,9 @@ type blockDec struct {
err error
- // Check against this crc
- checkCRC []byte
+ // Check against this crc, if hasCRC is true.
+ checkCRC uint32
+ hasCRC bool
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
@@ -97,7 +103,6 @@ type blockDec struct {
// Block is RLE, this is the size.
RLESize uint32
- tmp [4]byte
Type blockType
@@ -136,7 +141,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
- maxSize := maxBlockSize
+ maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
@@ -157,9 +162,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize)
}
b.RLESize = 0
- maxSize = maxCompressedBlockSize
+ maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
- maxSize = int(windowSize)
+ maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
@@ -188,16 +193,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
// Read block data.
- if cap(b.dataStorage) < cSize {
+ if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
+ // byteBuf doesn't need a destination buffer.
if b.lowMem || cSize > maxCompressedBlockSize {
- b.dataStorage = make([]byte, 0, cSize)
+ b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
- b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
+ b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
- if cap(b.dst) <= maxSize {
- b.dst = make([]byte, 0, maxSize+1)
- }
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debugDecoder {
@@ -206,6 +209,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return err
}
+ if cap(b.dst) <= maxSize {
+ b.dst = make([]byte, 0, maxSize+1)
+ }
return nil
}
@@ -229,7 +235,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
if b.lowMem {
b.dst = make([]byte, b.RLESize)
} else {
- b.dst = make([]byte, maxBlockSize)
+ b.dst = make([]byte, maxCompressedBlockSize)
}
}
b.dst = b.dst[:b.RLESize]
@@ -360,14 +366,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, litRegenSize)
+ b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
- if litRegenSize > maxCompressedLiteralSize {
- // Exceptional
- b.literalBuf = make([]byte, litRegenSize)
- } else {
- b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
- }
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
@@ -397,14 +398,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
var err error
// Use our out buffer.
- huff.MaxDecodedSize = maxCompressedBlockSize
+ huff.MaxDecodedSize = litRegenSize
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
@@ -429,9 +430,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
huff := hist.huffTree
@@ -442,13 +443,16 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
}
var err error
+ if debugDecoder {
+ println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
+ }
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
return in, err
}
hist.huffTree = huff
- huff.MaxDecodedSize = maxCompressedBlockSize
+ huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@@ -463,6 +467,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
+ // Re-cap to get extra size.
+ literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
@@ -486,10 +492,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.dst = append(b.dst, hist.decoders.literals...)
return nil
}
- err = hist.decoders.decodeSync(hist)
+ before := len(hist.decoders.out)
+ err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
if err != nil {
return err
}
+ if hist.decoders.maxSyncLen > 0 {
+ hist.decoders.maxSyncLen += uint64(before)
+ hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
+ }
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
return nil
@@ -632,6 +643,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
+ // Extract blocks...
+ if false && hist.dict == nil {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
+ var buf bytes.Buffer
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
+ buf.Write(in)
+ os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+ }
+
return nil
}
@@ -650,6 +677,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
+
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
return err
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 12e8f6f0b..fd4a36f73 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
return b.encodeLits(b.literals, rawAllLits)
}
// We want some difference to at least account for the headers.
- saved := b.size - len(b.literals) - (b.size >> 5)
+ saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
if org == nil {
return errIncompressible
@@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
}
b.output = wr.out
+ // Maybe even add a bigger margin.
if len(b.output)-3-bhOffset >= b.size {
- // Maybe even add a bigger margin.
+ // Discard and encode as raw block.
+ b.output = b.encodeRawTo(b.output[:bhOffset], org)
+ b.popOffsets()
b.litEnc.Reuse = huff0.ReusePolicyNone
- return errIncompressible
+ return nil
}
// Size is output minus block header.
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index b80191e4b..55a388553 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@ package zstd
import (
"fmt"
"io"
- "io/ioutil"
)
type byteBuffer interface {
@@ -23,7 +22,7 @@ type byteBuffer interface {
readByte() (byte, error)
// Skip n bytes.
- skipN(n int) error
+ skipN(n int64) error
}
// in-memory buffer
@@ -52,23 +51,22 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil
}
-func (b *byteBuf) remain() []byte {
- return *b
-}
-
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
- return 0, nil
+ return 0, io.ErrUnexpectedEOF
}
r := bb[0]
*b = bb[1:]
return r, nil
}
-func (b *byteBuf) skipN(n int) error {
+func (b *byteBuf) skipN(n int64) error {
bb := *b
- if len(bb) < n {
+ if n < 0 {
+ return fmt.Errorf("negative skip (%d) requested", n)
+ }
+ if int64(len(bb)) < n {
return io.ErrUnexpectedEOF
}
*b = bb[n:]
@@ -111,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
}
func (r *readerWrapper) readByte() (byte, error) {
- n2, err := r.r.Read(r.tmp[:1])
+ n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
@@ -124,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) {
return r.tmp[0], nil
}
-func (r *readerWrapper) skipN(n int) error {
- n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
- if n2 != int64(n) {
+func (r *readerWrapper) skipN(n int64) error {
+ n2, err := io.CopyN(io.Discard, r.r, n)
+ if n2 != n {
err = io.ErrUnexpectedEOF
}
return err
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
index 2c4fca17f..0e59a242d 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -13,12 +13,6 @@ type byteReader struct {
off int
}
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
- b.b = in
- b.off = 0
-}
-
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 5022e71c8..f6a240970 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -4,7 +4,6 @@
package zstd
import (
- "bytes"
"encoding/binary"
"errors"
"io"
@@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
}
h.HeaderSize += 4
b, in := in[:4], in[4:]
- if !bytes.Equal(b, frameMagic) {
- if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
+ if string(b) != frameMagic {
+ if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch
}
if len(in) < 4 {
@@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
}
b, in = in[:size], in[size:]
h.HeaderSize += int(size)
- switch size {
+ switch len(b) {
case 1:
h.DictionaryID = uint32(b[0])
case 2:
@@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
}
b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize)
- switch fcsSize {
+ switch len(b) {
case 1:
h.FrameContentSize = uint64(b[0])
case 2:
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index 9fcdaac1d..f04aaa21e 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,7 +5,6 @@
package zstd
import (
- "bytes"
"context"
"encoding/binary"
"io"
@@ -35,13 +34,13 @@ type Decoder struct {
br readerWrapper
enabled bool
inFrame bool
+ dstBuf []byte
}
frame *frameDec
// Custom dictionaries.
- // Always uses copies.
- dicts map[uint32]dict
+ dicts map[uint32]*dict
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
@@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
}
// Transfer option dicts.
- d.dicts = make(map[uint32]dict, len(d.o.dicts))
+ d.dicts = make(map[uint32]*dict, len(d.o.dicts))
for _, dc := range d.o.dicts {
d.dicts[dc.id] = dc
}
@@ -187,21 +186,23 @@ func (d *Decoder) Reset(r io.Reader) error {
}
// If bytes buffer and < 5MB, do sync decoding anyway.
- if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+ if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
bb2 := bb
if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb2.Bytes()
var dst []byte
- if cap(d.current.b) > 0 {
- dst = d.current.b
+ if cap(d.syncStream.dstBuf) > 0 {
+ dst = d.syncStream.dstBuf[:0]
}
- dst, err := d.DecodeAll(b, dst[:0])
+ dst, err := d.DecodeAll(b, dst)
if err == nil {
err = io.EOF
}
+ // Save output buffer
+ d.syncStream.dstBuf = dst
d.current.b = dst
d.current.err = err
d.current.flushed = true
@@ -216,6 +217,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.current.err = nil
d.current.flushed = false
d.current.d = nil
+ d.syncStream.dstBuf = nil
// Ensure no-one else is still running...
d.streamWg.Wait()
@@ -312,6 +314,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
+ initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
@@ -337,29 +340,36 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
return dst, err
}
- if frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- return nil, ErrUnknownDictionary
- }
+ if err = d.setDict(frame); err != nil {
+ return nil, err
+ }
+ if frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
- println("setting dict", frame.DictionaryID)
+ println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
}
- frame.history.setDict(&dict)
+ return dst, ErrWindowSizeExceeded
}
-
- if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
- return dst, ErrDecoderSizeExceeded
- }
- if frame.FrameContentSize < 1<<30 {
- // Never preallocate more than 1 GB up front.
+ if frame.FrameContentSize != fcsUnknown {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
- dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+ dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
- if cap(dst) == 0 {
+
+ if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
@@ -377,6 +387,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if err != nil {
return dst, err
}
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
@@ -437,26 +450,23 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
+ if d.o.ignoreChecksum {
+ return true
+ }
+
if len(next.b) > 0 {
- n, err := d.current.crc.Write(next.b)
- if err == nil {
- if n != len(next.b) {
- d.current.err = io.ErrShortWrite
- }
- }
+ d.current.crc.Write(next.b)
}
- if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
- got := d.current.crc.Sum64()
- var tmp [4]byte
- binary.LittleEndian.PutUint32(tmp[:], uint32(got))
- if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
+ if next.err == nil && next.d != nil && next.d.hasCRC {
+ got := uint32(d.current.crc.Sum64())
+ if got != next.d.checkCRC {
if debugDecoder {
- println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+ printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
}
d.current.err = ErrCRCMismatch
} else {
if debugDecoder {
- println("CRC ok", tmp[:])
+ printf("CRC ok %08x\n", got)
}
}
}
@@ -472,18 +482,12 @@ func (d *Decoder) nextBlockSync() (ok bool) {
if !d.syncStream.inFrame {
d.frame.history.reset()
d.current.err = d.frame.reset(&d.syncStream.br)
+ if d.current.err == nil {
+ d.current.err = d.setDict(d.frame)
+ }
if d.current.err != nil {
return false
}
- if d.frame.DictionaryID != nil {
- dict, ok := d.dicts[*d.frame.DictionaryID]
- if !ok {
- d.current.err = ErrUnknownDictionary
- return false
- } else {
- d.frame.history.setDict(&dict)
- }
- }
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
d.current.err = ErrDecoderSizeExceeded
return false
@@ -533,9 +537,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Update/Check CRC
if d.frame.HasCheckSum {
- d.frame.crc.Write(d.current.b)
+ if !d.o.ignoreChecksum {
+ d.frame.crc.Write(d.current.b)
+ }
if d.current.d.Last {
- d.current.err = d.frame.checkCRC()
+ if !d.o.ignoreChecksum {
+ d.current.err = d.frame.checkCRC()
+ } else {
+ d.current.err = d.frame.consumeCRC()
+ }
if d.current.err != nil {
println("CRC error:", d.current.err)
return false
@@ -629,60 +639,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
// Create Decoder:
// ASYNC:
-// Spawn 4 go routines.
-// 0: Read frames and decode blocks.
-// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
-// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
-// 3: Wait for stream history, execute sequences, send stream history.
+// Spawn 3 go routines.
+// 0: Read frames and decode block literals.
+// 1: Decode sequences.
+// 2: Execute sequences, send to output.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
br := readerWrapper{r: r}
- var seqPrepare = make(chan *blockDec, d.o.concurrent)
var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent)
- // Async 1: Prepare blocks...
- go func() {
- var hist history
- var hasErr bool
- for block := range seqPrepare {
- if hasErr {
- if block != nil {
- seqDecode <- block
- }
- continue
- }
- if block.async.newHist != nil {
- if debugDecoder {
- println("Async 1: new history")
- }
- hist.reset()
- if block.async.newHist.dict != nil {
- hist.setDict(block.async.newHist.dict)
- }
- }
- if block.err != nil || block.Type != blockTypeCompressed {
- hasErr = block.err != nil
- seqDecode <- block
- continue
- }
-
- remain, err := block.decodeLiterals(block.data, &hist)
- block.err = err
- hasErr = block.err != nil
- if err == nil {
- block.async.literals = hist.decoders.literals
- block.async.seqData = remain
- } else if debugDecoder {
- println("decodeLiterals error:", err)
- }
- seqDecode <- block
- }
- close(seqDecode)
- }()
-
- // Async 2: Decode sequences...
+ // Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
@@ -696,8 +664,9 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
- println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
+ println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
}
+ hist.reset()
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize
@@ -729,6 +698,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
seqExecute <- block
}
close(seqExecute)
+ hist.reset()
}()
var wg sync.WaitGroup
@@ -750,8 +720,9 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
- println("Async 3: new history")
+ println("Async 2: new history")
}
+ hist.reset()
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
if block.async.newHist.dict != nil {
@@ -781,7 +752,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
if block.lowMem {
block.dst = make([]byte, block.RLESize)
} else {
- block.dst = make([]byte, maxBlockSize)
+ block.dst = make([]byte, maxCompressedBlockSize)
}
}
block.dst = block.dst[:block.RLESize]
@@ -833,10 +804,38 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
if debugDecoder {
println("decoder goroutines finished")
}
+ hist.reset()
}()
+ var hist history
decodeStream:
for {
+ var hasErr bool
+ hist.reset()
+ decodeBlock := func(block *blockDec) {
+ if hasErr {
+ if block != nil {
+ seqDecode <- block
+ }
+ return
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqDecode <- block
+ return
+ }
+
+ remain, err := block.decodeLiterals(block.data, &hist)
+ block.err = err
+ hasErr = block.err != nil
+ if err == nil {
+ block.async.literals = hist.decoders.literals
+ block.async.seqData = remain
+ } else if debugDecoder {
+ println("decodeLiterals error:", err)
+ }
+ seqDecode <- block
+ }
frame := d.frame
if debugDecoder {
println("New frame...")
@@ -847,15 +846,14 @@ decodeStream:
if debugDecoder && err != nil {
println("Frame decoder returned", err)
}
- if err == nil && frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- err = ErrUnknownDictionary
- } else {
- frame.history.setDict(&dict)
- }
+ if err == nil {
+ err = d.setDict(frame)
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
err = ErrDecoderSizeExceeded
}
if err != nil {
@@ -863,7 +861,7 @@ decodeStream:
case <-ctx.Done():
case dec := <-d.decoders:
dec.sendErr(err)
- seqPrepare <- dec
+ decodeBlock(dec)
}
break decodeStream
}
@@ -883,6 +881,10 @@ decodeStream:
if debugDecoder {
println("Alloc History:", h.allocFrameBuffer)
}
+ hist.reset()
+ if h.dict != nil {
+ hist.setDict(h.dict)
+ }
dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize
historySent = true
@@ -893,23 +895,27 @@ decodeStream:
println("next block returned error:", err)
}
dec.err = err
- dec.checkCRC = nil
+ dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4)
- if err != nil {
+ if len(crc) < 4 {
+ if err == nil {
+ err = io.ErrUnexpectedEOF
+
+ }
println("CRC missing?", err)
dec.err = err
- }
- var tmp [4]byte
- copy(tmp[:], crc)
- dec.checkCRC = tmp[:]
- if debugDecoder {
- println("found crc to check:", dec.checkCRC)
+ } else {
+ dec.checkCRC = binary.LittleEndian.Uint32(crc)
+ dec.hasCRC = true
+ if debugDecoder {
+ printf("found crc to check: %08x\n", dec.checkCRC)
+ }
}
}
err = dec.err
last := dec.Last
- seqPrepare <- dec
+ decodeBlock(dec)
if err != nil {
break decodeStream
}
@@ -918,7 +924,25 @@ decodeStream:
}
}
}
- close(seqPrepare)
+ close(seqDecode)
wg.Wait()
+ hist.reset()
d.frame.history.b = frameHistCache
}
+
+func (d *Decoder) setDict(frame *frameDec) (err error) {
+ dict, ok := d.dicts[frame.DictionaryID]
+ if ok {
+ if debugDecoder {
+ println("setting dict", frame.DictionaryID)
+ }
+ frame.history.setDict(dict)
+ } else if frame.DictionaryID != 0 {
+ // A zero or missing dictionary id is ambiguous:
+ // either dictionary zero, or no dictionary. In particular,
+ // zstd --patch-from uses this id for the source file,
+ // so only return an error if the dictionary id is not zero.
+ err = ErrUnknownDictionary
+ }
+ return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index fd05c9bb0..07a90dd7a 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -6,6 +6,8 @@ package zstd
import (
"errors"
+ "fmt"
+ "math/bits"
"runtime"
)
@@ -14,24 +16,28 @@ type DOption func(*decoderOptions) error
// options retains accumulated state of multiple options.
type decoderOptions struct {
- lowMem bool
- concurrent int
- maxDecodedSize uint64
- maxWindowSize uint64
- dicts []dict
+ lowMem bool
+ concurrent int
+ maxDecodedSize uint64
+ maxWindowSize uint64
+ dicts []*dict
+ ignoreChecksum bool
+ limitToCap bool
+ decodeBufsBelow int
}
func (o *decoderOptions) setDefault() {
*o = decoderOptions{
// use less ram: true for now, but may change.
- lowMem: true,
- concurrent: runtime.GOMAXPROCS(0),
- maxWindowSize: MaxWindowSize,
+ lowMem: true,
+ concurrent: runtime.GOMAXPROCS(0),
+ maxWindowSize: MaxWindowSize,
+ decodeBufsBelow: 128 << 10,
}
if o.concurrent > 4 {
o.concurrent = 4
}
- o.maxDecodedSize = 1 << 63
+ o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
@@ -66,7 +72,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
-// Maximum and default is 1 << 63 bytes.
+// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
@@ -81,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption {
}
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
-// If several dictionaries with the same ID is provided the last one will be used.
+//
+// Each slice in dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
+// If several dictionaries with the same ID are provided, the last one will be used.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithDecoderDicts(dicts ...[]byte) DOption {
return func(o *decoderOptions) error {
for _, b := range dicts {
@@ -89,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
if err != nil {
return err
}
- o.dicts = append(o.dicts, *d)
+ o.dicts = append(o.dicts, d)
}
return nil
}
}
+// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
+// The slice content can be arbitrary data.
+func WithDecoderDictRaw(id uint32, content []byte) DOption {
+ return func(o *decoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
+ return nil
+ }
+}
+
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
// This allows rejecting packets that will cause big memory usage.
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
@@ -112,3 +136,34 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil
}
}
+
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+ return func(o *decoderOptions) error {
+ o.decodeBufsBelow = size
+ return nil
+ }
+}
+
+// IgnoreChecksum allows to forcibly ignore checksum checking.
+func IgnoreChecksum(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.ignoreChecksum = b
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index a36ae83ef..ca0951452 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -1,7 +1,6 @@
package zstd
import (
- "bytes"
"encoding/binary"
"errors"
"fmt"
@@ -20,7 +19,10 @@ type dict struct {
content []byte
}
-var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+const dictMagic = "\x37\xa4\x30\xec"
+
+// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
+const dictMaxLength = 1 << 31
// ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 {
@@ -30,14 +32,38 @@ func (d *dict) ID() uint32 {
return d.id
}
-// DictContentSize returns the dictionary content size or 0 if d is nil.
-func (d *dict) DictContentSize() int {
+// ContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) ContentSize() int {
if d == nil {
return 0
}
return len(d.content)
}
+// Content returns the dictionary content.
+func (d *dict) Content() []byte {
+ if d == nil {
+ return nil
+ }
+ return d.content
+}
+
+// Offsets returns the initial offsets.
+func (d *dict) Offsets() [3]int {
+ if d == nil {
+ return [3]int{}
+ }
+ return d.offsets
+}
+
+// LitEncoder returns the literal encoder.
+func (d *dict) LitEncoder() *huff0.Scratch {
+ if d == nil {
+ return nil
+ }
+ return d.litEnc
+}
+
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
@@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) {
ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}},
}
- if !bytes.Equal(b[:4], dictMagic[:]) {
+ if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch
}
d.id = binary.LittleEndian.Uint32(b[4:8])
@@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
var err error
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("loading literal table: %w", err)
}
d.litEnc.Reuse = huff0.ReusePolicyMust
@@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
return &d, nil
}
+
+// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
+func InspectDictionary(b []byte) (interface {
+ ID() uint32
+ ContentSize() int
+ Content() []byte
+ Offsets() [3]int
+ LitEncoder() *huff0.Scratch
+}, error) {
+ initPredefined()
+ d, err := loadDict(b)
+ return d, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 15ae8ee80..e008b9929 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -16,6 +16,7 @@ type fastBase struct {
cur int32
// maximum offset. Should be at least 2x block size.
maxMatchOff int32
+ bufferReset int32
hist []byte
crc *xxhash.Digest
tmp [8]byte
@@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
}
func (e *fastBase) addBlock(src []byte) int32 {
- if debugAsserts && e.cur > bufferReset {
- panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+ if debugAsserts && e.cur > e.bufferReset {
+ panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
}
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
@@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
- a := src[s:]
- b := src[t:]
- b = b[:len(a)]
- end := int32((len(a) >> 3) << 3)
- for i := int32(0); i < end; i += 8 {
- if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
- return i + int32(bits.TrailingZeros64(diff)>>3)
- }
- }
-
- a = a[end:]
- b = b[end:]
- for i := range a {
- if a[i] != b[i] {
- return int32(i) + end
- }
- }
- return int32(len(a)) + end
+ return int32(matchLen(src[s:], src[t:]))
}
// Reset the encoding table.
@@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
if singleBlock {
e.lowMem = true
}
- e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+ e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
e.lowMem = low
}
// We offset current position so everything will be out of reach.
// If above reset line, history will be purged.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist))
}
e.hist = e.hist[:0]
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 96028ecd8..9819d4145 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -34,7 +34,7 @@ type match struct {
est int32
}
-const highScore = 25000
+const highScore = maxMatchLen * 8
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
@@ -84,14 +84,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = prevEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = prevEntry{}
- }
+ e.table = [bestShortTableSize]prevEntry{}
+ e.longTable = [bestLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
@@ -163,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
- cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@@ -177,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
- _ = addLiterals
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
@@ -192,49 +186,96 @@ encodeLoop:
panic("offset0 was 0")
}
- bestOf := func(a, b match) match {
- if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
- return a
- }
- return b
- }
- const goodEnough = 100
+ const goodEnough = 250
+
+ cv := load6432(src, s)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
- matchAt := func(offset int32, s int32, first uint32, rep int32) match {
+ // Set m to a match at offset if it looks like that will improve compression.
+ improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
- return match{s: s, est: highScore}
+ return
}
if debugAsserts {
+ if offset <= 0 {
+ panic(offset)
+ }
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
- m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
- m.estBits(bitsPerByte)
- return m
+ // Try to quick reject if we already have a long match.
+ if m.length > 16 {
+ left := len(src) - int(m.s+m.length)
+ // If we are too close to the end, keep as is.
+ if left <= 0 {
+ return
+ }
+ checkLen := m.length - (s - m.s) - 8
+ if left > 2 && checkLen > 4 {
+ // Check 4 bytes, 4 bytes from the end of the current match.
+ a := load3232(src, offset+checkLen)
+ b := load3232(src, s+checkLen)
+ if a != b {
+ return
+ }
+ }
+ }
+ l := 4 + e.matchlen(s+4, offset+4, src)
+ if rep < 0 {
+ // Extend candidate match backwards as far as possible.
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
+ s--
+ offset--
+ l++
+ }
+ }
+
+ cand := match{offset: offset, s: s, length: l, rep: rep}
+ cand.estBits(bitsPerByte)
+ if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
+ *m = cand
+ }
}
- best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+ best := match{s: s, est: highScore}
+ improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
- cv32 := uint32(cv >> 8)
- spp := s + 1
- best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
- best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
- best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
- if best.length > 0 {
- cv32 = uint32(cv >> 24)
- spp += 2
- best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
- best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
- best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
+ if s == nextEmit {
+ // Check repeats straight after a match.
+ improve(&best, s-offset2, s, uint32(cv), 1|4)
+ improve(&best, s-offset3, s, uint32(cv), 2|4)
+ if offset1 > 1 {
+ improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
+ }
+ }
+
+ // If either no match or a non-repeat match, check at + 1
+ if best.rep <= 0 {
+ cv32 := uint32(cv >> 8)
+ spp := s + 1
+ improve(&best, spp-offset1, spp, cv32, 1)
+ improve(&best, spp-offset2, spp, cv32, 2)
+ improve(&best, spp-offset3, spp, cv32, 3)
+ if best.rep < 0 {
+ cv32 = uint32(cv >> 24)
+ spp += 2
+ improve(&best, spp-offset1, spp, cv32, 1)
+ improve(&best, spp-offset2, spp, cv32, 2)
+ improve(&best, spp-offset3, spp, cv32, 3)
+ }
}
}
// Load next and check...
@@ -249,40 +290,45 @@ encodeLoop:
if s >= sLimit {
break encodeLoop
}
- cv = load6432(src, s)
continue
}
- s++
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
- cv = load6432(src, s)
- cv2 := load6432(src, s+1)
+ cv = load6432(src, s+1)
+ cv2 := load6432(src, s+2)
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
- best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+ improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
// Long at s+1, s+2
- best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
- best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+ improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
+ improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
+ improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
+ improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
- best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
+ improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
}
- // See if we can find a better match by checking where the current best ends.
- // Use that offset to see if we can find a better full match.
- if sAt := best.s + best.length; sAt < sLimit {
- nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
- candidateEnd := e.longTable[nextHashL]
- if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
- bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
- if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
- bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
+
+ // Start check at a fixed offset to allow for a few mismatches.
+ // For this compression level 2 yields the best results.
+ // We cannot do this if we have already indexed this position.
+ const skipBeginning = 2
+ if best.s > s-skipBeginning {
+ // See if we can find a better match by checking where the current best ends.
+ // Use that offset to see if we can find a better full match.
+ if sAt := best.s + best.length; sAt < sLimit {
+ nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
+ candidateEnd := e.longTable[nextHashL]
+
+ if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
+ improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+ if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
+ improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+ }
}
- best = bestEnd
}
}
}
@@ -295,51 +341,34 @@ encodeLoop:
// We have a match, we can store the forward value
if best.rep > 0 {
- s = best.s
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
-
- // We might be able to match backwards.
- // Extend as long as we can.
- start := best.s
- // We end the search early, so we don't risk 0 literals
- // and have to do special offset treatment.
- startLimit := nextEmit + 1
-
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- repIndex := best.offset
- for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
- repIndex--
- start--
- seq.matchLen++
+ if debugAsserts && s <= nextEmit {
+ panic("s <= nextEmit")
}
- addLiterals(&seq, start)
+ addLiterals(&seq, best.s)
- // rep 0
- seq.offset = uint32(best.rep)
+ // Repeat. If bit 4 is set, this is a non-lit repeat.
+ seq.offset = uint32(best.rep & 3)
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- // Index match start+1 (long) -> s - 1
- index0 := s
+ // Index old s + 1 -> s - 1
+ index0 := s + 1
s = best.s + best.length
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
-
}
break encodeLoop
}
// Index skipped...
off := index0 + e.cur
- for index0 < s-1 {
+ for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -349,17 +378,19 @@ encodeLoop:
index0++
}
switch best.rep {
- case 2:
+ case 2, 4 | 1:
offset1, offset2 = offset2, offset1
- case 3:
+ case 3, 4 | 2:
offset1, offset2, offset3 = offset3, offset1, offset2
+ case 4 | 3:
+ offset1, offset2, offset3 = offset1-1, offset1, offset2
}
- cv = load6432(src, s)
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
+ index0 := s + 1
s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@@ -372,22 +403,9 @@ encodeLoop:
panic("invalid offset")
}
- // Extend the n-byte match as long as possible.
- l := best.length
-
- // Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
- s--
- t--
- l++
- }
-
// Write our sequence
var seq seq
+ l := best.length
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
@@ -404,10 +422,8 @@ encodeLoop:
break encodeLoop
}
- // Index match start+1 (long) -> s - 1
- index0 := s - l + 1
- // every entry
- for index0 < s-1 {
+ // Index old s + 1 -> s - 1
+ for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -416,50 +432,6 @@ encodeLoop:
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
}
-
- cv = load6432(src, s)
- if !canRepeat {
- continue
- }
-
- // Check offset 2
- for {
- o2 := s - offset2
- if load3232(src, o2) != uint32(cv) {
- // Do regular search
- break
- }
-
- // Store this, since we have it.
- nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
- nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
-
- // We have at least 4 byte match.
- // No need to check backwards. We come straight from a match
- l := 4 + e.matchlen(s+4, o2+4, src)
-
- e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
- e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
- seq.matchLen = uint32(l) - zstdMinMatch
- seq.litLen = 0
-
- // Since litlen is always 0, this is offset 1.
- seq.offset = 1
- s += l
- nextEmit = s
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
-
- // Swap offset 1 and 2.
- offset1, offset2 = offset2, offset1
- if s >= sLimit {
- // Finished
- break encodeLoop
- }
- cv = load6432(src, s)
- }
}
if int(nextEmit) < len(src) {
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 602c05ee0..8582f31a7 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = prevEntry{}
- }
+ e.table = [betterShortTableSize]tableEntry{}
+ e.longTable = [betterLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
@@ -156,8 +152,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -416,15 +412,23 @@ encodeLoop:
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
- cv := load3232(src, s)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
- coffsetL := candidateL.offset - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
@@ -434,12 +438,13 @@ encodeLoop:
// Check prev long...
if true {
- coffsetL = candidateL.prev - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
@@ -518,8 +523,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -578,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -674,8 +679,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -1047,8 +1052,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index d6b310424..7d425109a 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = tableEntry{}
- }
+ e.table = [dFastShortTableSize]tableEntry{}
+ e.longTable = [dFastLongTableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
@@ -127,8 +123,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- if e.cur >= bufferReset {
+ if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@@ -439,8 +435,8 @@ encodeLoop:
var t int32
for {
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -685,7 +681,7 @@ encodeLoop:
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -785,8 +781,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -969,7 +965,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
- longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@@ -1002,8 +998,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -1103,7 +1099,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1114,7 +1111,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f51ab529a..315b1a8f2 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -304,13 +304,13 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
if debugEncoder {
- if len(src) > maxBlockSize {
+ if len(src) > maxCompressedBlockSize {
panic("src too big")
}
}
// Protect against e.cur wraparound.
- if e.cur >= bufferReset {
+ if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@@ -538,7 +538,7 @@ encodeLoop:
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
return
}
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
+ e.table = [tableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
@@ -871,7 +869,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
@@ -883,7 +882,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index dcc987a7c..4de0aed0d 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -8,6 +8,7 @@ import (
"crypto/rand"
"fmt"
"io"
+ "math"
rdebug "runtime/debug"
"sync"
@@ -276,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.eofWritten = true
}
- err := errIncompressible
- // If we got the exact same number of literals as input,
- // assume the literals cannot be compressed.
- if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
- err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
- }
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- blk.encodeRaw(src)
- // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
- case nil:
- default:
- s.err = err
- return err
+ s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if s.err != nil {
+ return s.err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
@@ -342,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error {
}
s.wWg.Done()
}()
- err := errIncompressible
- // If we got the exact same number of literals as input,
- // assume the literals cannot be compressed.
- if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
- err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
- }
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- blk.encodeRaw(src)
- // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
- case nil:
- default:
- s.writeErr = err
+ s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if s.writeErr != nil {
return
}
_, s.writeErr = s.w.Write(blk.output)
@@ -528,8 +501,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If a non-single block is needed the encoder will reset again.
e.encoders <- enc
}()
- // Use single segments when above minimum window and below 1MB.
- single := len(src) < 1<<20 && len(src) > MinWindowSize
+ // Use single segments when above minimum window and below window size.
+ single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
single = *e.o.single
}
@@ -551,7 +524,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
// If we can do everything in one block, prefer that.
- if len(src) <= maxCompressedBlockSize {
+ if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {
@@ -567,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
- err := errIncompressible
oldout := blk.output
- if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
- // Output directly to dst
- blk.output = dst
- err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
- }
+ // Output directly to dst
+ blk.output = dst
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- dst = blk.encodeRawTo(dst, src)
- case nil:
- dst = blk.output
- default:
+ err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if err != nil {
panic(err)
}
+ dst = blk.output
blk.output = oldout
} else {
enc.Reset(e.o.dict, false)
@@ -604,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 {
blk.last = true
}
- err := errIncompressible
- // If we got the exact same number of literals as input,
- // assume the literals cannot be compressed.
- if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
- err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
- }
-
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- dst = blk.encodeRawTo(dst, todo)
- blk.popOffsets()
- case nil:
- dst = append(dst, blk.output...)
- default:
+ err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+ if err != nil {
panic(err)
}
+ dst = append(dst, blk.output...)
blk.reset(nil)
}
}
@@ -639,3 +588,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
+
+// MaxEncodedSize returns the expected maximum
+// size of an encoded block or stream.
+func (e *Encoder) MaxEncodedSize(size int) int {
+ frameHeader := 4 + 2 // magic + frame header & window descriptor
+ if e.o.dict != nil {
+ frameHeader += 4
+ }
+ // Frame content size:
+ if size < 256 {
+ frameHeader++
+ } else if size < 65536+256 {
+ frameHeader += 2
+ } else if size < math.MaxInt32 {
+ frameHeader += 4
+ } else {
+ frameHeader += 8
+ }
+ // Final crc
+ if e.o.crc {
+ frameHeader += 4
+ }
+
+ // Max overhead is 3 bytes/block.
+ // There cannot be 0 blocks.
+ blocks := (size + e.o.blockSize) / e.o.blockSize
+
+ // Combine, add padding.
+ maxSz := frameHeader + 3*blocks + size
+ if e.o.pad > 1 {
+ maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
+ }
+ return maxSz
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 44d8dbd19..50f70533b 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -3,6 +3,8 @@ package zstd
import (
"errors"
"fmt"
+ "math"
+ "math/bits"
"runtime"
"strings"
)
@@ -37,7 +39,7 @@ func (o *encoderOptions) setDefault() {
blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
- allLitEntropy: true,
+ allLitEntropy: false,
lowMem: false,
}
}
@@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder {
switch o.level {
case SpeedFastest:
if o.dict != nil {
- return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
- return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault:
if o.dict != nil {
- return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+ return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
}
- return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression:
if o.dict != nil {
- return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
- return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression:
- return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
}
panic("unknown compression level")
}
@@ -236,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
}
}
if !o.customALEntropy {
- o.allLitEntropy = l > SpeedFastest
+ o.allLitEntropy = l > SpeedDefault
}
return nil
@@ -283,7 +285,7 @@ func WithNoEntropyCompression(b bool) EOption {
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
-// If this is not specified, block encodes will automatically choose this based on the input size.
+// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
// This setting has no effect on streamed encodes.
func WithSingleSegment(b bool) EOption {
return func(o *encoderOptions) error {
@@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption {
}
// WithEncoderDict allows to register a dictionary that will be used for the encode.
+//
+// The slice dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
// The encoder *may* choose to use no dictionary instead for certain payloads.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption {
return func(o *encoderOptions) error {
d, err := loadDict(dict)
@@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption {
return nil
}
}
+
+// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
+//
+// The slice content may contain arbitrary data. It will be used as an initial
+// history.
+func WithEncoderDictRaw(id uint32, content []byte) EOption {
+ return func(o *encoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 11089d223..cc0aa2274 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -5,7 +5,7 @@
package zstd
import (
- "bytes"
+ "encoding/binary"
"encoding/hex"
"errors"
"io"
@@ -29,7 +29,7 @@ type frameDec struct {
FrameContentSize uint64
- DictionaryID *uint32
+ DictionaryID uint32
HasCheckSum bool
SingleSegment bool
}
@@ -43,9 +43,9 @@ const (
MaxWindowSize = 1 << 29
)
-var (
- frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
- skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+const (
+ frameMagic = "\x28\xb5\x2f\xfd"
+ skippableFrameMagic = "\x2a\x4d\x18"
)
func newFrameDec(o decoderOptions) *frameDec {
@@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
copy(signature[1:], b)
}
- if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
+ if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder {
- println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
+ println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
}
// Break if not skippable frame.
break
@@ -106,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error {
}
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.")
- err = br.skipN(int(n))
+ err = br.skipN(int64(n))
if err != nil {
if debugDecoder {
println("Reading discarded frame", err)
@@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
return err
}
}
- if !bytes.Equal(signature[:], frameMagic) {
+ if string(signature[:]) != frameMagic {
if debugDecoder {
- println("Got magic numbers: ", signature, "want:", frameMagic)
+ println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
}
return ErrMagicMismatch
}
@@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
- d.DictionaryID = nil
+ d.DictionaryID = 0
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
@@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
return err
}
var id uint32
- switch size {
+ switch len(b) {
case 1:
id = uint32(b[0])
case 2:
@@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error {
if debugDecoder {
println("Dict size", size, "ID:", id)
}
- if id > 0 {
- // ID 0 means "sorry, no dictionary anyway".
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
- d.DictionaryID = &id
- }
+ d.DictionaryID = id
}
// Read Frame_Content_Size
@@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error {
println("Reading Frame content", err)
return err
}
- switch fcsSize {
+ switch len(b) {
case 1:
d.FrameContentSize = uint64(b[0])
case 2:
@@ -231,20 +227,27 @@ func (d *frameDec) reset(br byteBuffer) error {
d.crc.Reset()
}
+ if d.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrWindowSizeExceeded
+ }
+
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
if d.WindowSize < MinWindowSize {
d.WindowSize = MinWindowSize
}
- }
-
- if d.WindowSize > uint64(d.o.maxWindowSize) {
- if debugDecoder {
- printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ if d.WindowSize > d.o.maxDecodedSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrDecoderSizeExceeded
}
- return ErrWindowSizeExceeded
}
+
// The minimum Window_Size is 1 KB.
if d.WindowSize < MinWindowSize {
if debugDecoder {
@@ -253,11 +256,17 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
- if d.o.lowMem && d.history.windowSize < maxBlockSize {
+ if !d.o.lowMem || d.history.windowSize < maxBlockSize {
+ // Alloc 2x window size if not low-mem, or window size below 2MB.
d.history.allocFrameBuffer = d.history.windowSize * 2
- // TODO: Maybe use FrameContent size
} else {
- d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+ if d.o.lowMem {
+ // Alloc with 1MB extra.
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+ } else {
+ // Alloc with 2MB extra.
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+ }
}
if debugDecoder {
@@ -284,40 +293,41 @@ func (d *frameDec) next(block *blockDec) error {
return nil
}
-// checkCRC will check the checksum if the frame has one.
+// checkCRC will check the checksum, assuming the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
- if !d.HasCheckSum {
- return nil
- }
- var tmp [4]byte
- got := d.crc.Sum64()
- // Flip to match file order.
- tmp[0] = byte(got >> 0)
- tmp[1] = byte(got >> 8)
- tmp[2] = byte(got >> 16)
- tmp[3] = byte(got >> 24)
-
// We can overwrite upper tmp now
- want, err := d.rawInput.readSmall(4)
+ buf, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
- if !bytes.Equal(tmp[:], want) && !ignoreCRC {
+ want := binary.LittleEndian.Uint32(buf[:4])
+ got := uint32(d.crc.Sum64())
+
+ if got != want {
if debugDecoder {
- println("CRC Check Failed:", tmp[:], "!=", want)
+ printf("CRC check failed: got %08x, want %08x\n", got, want)
}
return ErrCRCMismatch
}
if debugDecoder {
- println("CRC ok", tmp[:])
+ printf("CRC ok %08x\n", got)
}
return nil
}
-// runDecoder will create a sync decoder that will decode a block of data.
+// consumeCRC skips over the checksum, assuming the frame has one.
+func (d *frameDec) consumeCRC() error {
+ _, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ }
+ return err
+}
+
+// runDecoder will run the decoder for the remainder of the frame.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
@@ -326,6 +336,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
+ d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
+ if d.FrameContentSize != fcsUnknown {
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
+ if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ // Alloc for output
+ dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)
@@ -339,7 +373,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err != nil {
break
}
- if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
err = ErrDecoderSizeExceeded
break
}
@@ -360,14 +400,11 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
- var n int
- n, err = d.crc.Write(dst[crcStart:])
- if err == nil {
- if n != len(dst)-crcStart {
- err = io.ErrShortWrite
- } else {
- err = d.checkCRC()
- }
+ if d.o.ignoreChecksum {
+ err = d.consumeCRC()
+ } else {
+ d.crc.Write(dst[crcStart:])
+ err = d.checkCRC()
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index bb3d4fd6c..2f8860a72 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -5,8 +5,10 @@
package zstd
import (
+ "encoding/binary"
"errors"
"fmt"
+ "io"
)
const (
@@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3)
- // println(s.norm[:s.symbolLen], s.symbolLen)
return s.buildDtable()
}
+func (s *fseDecoder) mustReadFrom(r io.Reader) {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ // dt [maxTablesize]decSymbol // Decompression table.
+ // symbolLen uint16 // Length of active part of the symbol table.
+ // actualTableLog uint8 // Selected tablelog.
+ // maxBits uint8 // Maximum number of additional bits
+ // // used for table creation to avoid allocations.
+ // stateTable [256]uint16
+ // norm [maxSymbolValue + 1]int16
+ // preDefined bool
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
+}
+
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
@@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
-func (d decSymbol) baseline() uint32 {
- return uint32(d >> 32)
-}
-
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
-func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
- *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
-}
-
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
@@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16
}
-func (d *decSymbol) setBaseline(baseline uint32) {
- const mask = 0xffffffff
- *d = (*d & mask) | decSymbol(baseline)<<32
-}
-
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) {
s.dt[0] = symbol
}
-// buildDtable will build the decoding table.
-func (s *fseDecoder) buildDtable() error {
- tableSize := uint32(1 << s.actualTableLog)
- highThreshold := tableSize - 1
- symbolNext := s.stateTable[:256]
-
- // Init, lay down lowprob symbols
- {
- for i, v := range s.norm[:s.symbolLen] {
- if v == -1 {
- s.dt[highThreshold].setAddBits(uint8(i))
- highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
- }
- }
- }
- // Spread symbols
- {
- tableMask := tableSize - 1
- step := tableStep(tableSize)
- position := uint32(0)
- for ss, v := range s.norm[:s.symbolLen] {
- for i := 0; i < int(v); i++ {
- s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
- // lowprob area
- position = (position + step) & tableMask
- }
- }
- }
- if position != 0 {
- // position must reach all cells once, otherwise normalizedCounter is incorrect
- return errors.New("corrupted input (position != 0)")
- }
- }
-
- // Build Decoding table
- {
- tableSize := uint16(1 << s.actualTableLog)
- for u, v := range s.dt[:tableSize] {
- symbol := v.addBits()
- nextState := symbolNext[symbol]
- symbolNext[symbol] = nextState + 1
- nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
- s.dt[u&maxTableMask].setNBits(nBits)
- newState := (nextState << nBits) - tableSize
- if newState > tableSize {
- return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
- }
- if newState == uint16(u) && nBits == 0 {
- // Seems weird that this is possible with nbits > 0.
- return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
- }
- s.dt[u&maxTableMask].setNewState(newState)
- }
- }
- return nil
-}
-
// transform will transform the decoder table into a table usable for
// decoding without having to apply the transformation while decoding.
// The state will contain the base value and the number of bits to read.
@@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)]
}
-// next returns the current symbol and sets the next state.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) next(br *bitReader) {
- lowBits := uint16(br.getBits(s.state.nbBits()))
- s.state = s.dt[s.state.newState()+lowBits]
-}
-
-// finished returns true if all bits have been read from the bitstream
-// and the next state would require reading bits from the input.
-func (s *fseState) finished(br *bitReader) bool {
- return br.finished() && s.state.nbBits() > 0
-}
-
-// final returns the current state symbol without decoding the next.
-func (s *fseState) final() (int, uint8) {
- return s.state.baselineInt(), s.state.addBits()
-}
-
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
-
-// nextFast returns the next symbol and sets the next state.
-// This can only be used if no symbols are 0 bits.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := br.get16BitsFast(s.state.nbBits())
- s.state = s.dt[s.state.newState()+lowBits]
- return s.state.baseline(), s.state.addBits()
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
new file mode 100644
index 000000000..d04a829b0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -0,0 +1,65 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+)
+
+type buildDtableAsmContext struct {
+ // inputs
+ stateTable *uint16
+ norm *int16
+ dt *uint64
+
+ // outputs --- set by the procedure in the case of error;
+ // for interpretation please see the error handling part below
+ errParam1 uint64
+ errParam2 uint64
+}
+
+// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
+// Function returns non-zero exit code on error.
+//
+//go:noescape
+func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+
+// please keep in sync with _generate/gen_fse.go
+const (
+ errorCorruptedNormalizedCounter = 1
+ errorNewStateTooBig = 2
+ errorNewStateNoBits = 3
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ ctx := buildDtableAsmContext{
+ stateTable: &s.stateTable[0],
+ norm: &s.norm[0],
+ dt: (*uint64)(&s.dt[0]),
+ }
+ code := buildDtable_asm(s, &ctx)
+
+ if code != 0 {
+ switch code {
+ case errorCorruptedNormalizedCounter:
+ position := ctx.errParam1
+ return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
+
+ case errorNewStateTooBig:
+ newState := decSymbol(ctx.errParam1)
+ size := ctx.errParam2
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
+
+ case errorNewStateNoBits:
+ newState := decSymbol(ctx.errParam1)
+ oldState := decSymbol(ctx.errParam2)
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
+
+ default:
+ return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
new file mode 100644
index 000000000..bcde39869
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -0,0 +1,126 @@
+// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+TEXT ·buildDtable_asm(SB), $0-24
+ MOVQ ctx+8(FP), CX
+ MOVQ s+0(FP), DI
+
+ // Load values
+ MOVBQZX 4098(DI), DX
+ XORQ AX, AX
+ BTSQ DX, AX
+ MOVQ (CX), BX
+ MOVQ 16(CX), SI
+ LEAQ -1(AX), R8
+ MOVQ 8(CX), CX
+ MOVWQZX 4096(DI), DI
+
+ // End load values
+ // Init, lay down lowprob symbols
+ XORQ R9, R9
+ JMP init_main_loop_condition
+
+init_main_loop:
+ MOVWQSX (CX)(R9*2), R10
+ CMPW R10, $-1
+ JNE do_not_update_high_threshold
+ MOVB R9, 1(SI)(R8*8)
+ DECQ R8
+ MOVQ $0x0000000000000001, R10
+
+do_not_update_high_threshold:
+ MOVW R10, (BX)(R9*2)
+ INCQ R9
+
+init_main_loop_condition:
+ CMPQ R9, DI
+ JL init_main_loop
+
+ // Spread symbols
+ // Calculate table step
+ MOVQ AX, R9
+ SHRQ $0x01, R9
+ MOVQ AX, R10
+ SHRQ $0x03, R10
+ LEAQ 3(R9)(R10*1), R9
+
+ // Fill add bits values
+ LEAQ -1(AX), R10
+ XORQ R11, R11
+ XORQ R12, R12
+ JMP spread_main_loop_condition
+
+spread_main_loop:
+ XORQ R13, R13
+ MOVWQSX (CX)(R12*2), R14
+ JMP spread_inner_loop_condition
+
+spread_inner_loop:
+ MOVB R12, 1(SI)(R11*8)
+
+adjust_position:
+ ADDQ R9, R11
+ ANDQ R10, R11
+ CMPQ R11, R8
+ JG adjust_position
+ INCQ R13
+
+spread_inner_loop_condition:
+ CMPQ R13, R14
+ JL spread_inner_loop
+ INCQ R12
+
+spread_main_loop_condition:
+ CMPQ R12, DI
+ JL spread_main_loop
+ TESTQ R11, R11
+ JZ spread_check_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R11, 24(AX)
+ MOVQ $+1, ret+16(FP)
+ RET
+
+spread_check_ok:
+ // Build Decoding table
+ XORQ DI, DI
+
+build_table_main_table:
+ MOVBQZX 1(SI)(DI*8), CX
+ MOVWQZX (BX)(CX*2), R8
+ LEAQ 1(R8), R9
+ MOVW R9, (BX)(CX*2)
+ MOVQ R8, R9
+ BSRQ R9, R9
+ MOVQ DX, CX
+ SUBQ R9, CX
+ SHLQ CL, R8
+ SUBQ AX, R8
+ MOVB CL, (SI)(DI*8)
+ MOVW R8, 2(SI)(DI*8)
+ CMPQ R8, AX
+ JLE build_table_check1_ok
+ MOVQ ctx+8(FP), CX
+ MOVQ R8, 24(CX)
+ MOVQ AX, 32(CX)
+ MOVQ $+2, ret+16(FP)
+ RET
+
+build_table_check1_ok:
+ TESTB CL, CL
+ JNZ build_table_check2_ok
+ CMPW R8, DI
+ JNE build_table_check2_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R8, 24(AX)
+ MOVQ DI, 32(AX)
+ MOVQ $+3, ret+16(FP)
+ RET
+
+build_table_check2_ok:
+ INCQ DI
+ CMPQ DI, AX
+ JL build_table_main_table
+ MOVQ $+0, ret+16(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
new file mode 100644
index 000000000..332e51fe4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -0,0 +1,72 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ symbolNext := s.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ {
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.dt[highThreshold].setAddBits(uint8(i))
+ highThreshold--
+ symbolNext[i] = 1
+ } else {
+ symbolNext[i] = uint16(v)
+ }
+ }
+ }
+
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.dt[position].setAddBits(uint8(ss))
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ // lowprob area
+ position = (position + step) & tableMask
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.dt[:tableSize] {
+ symbol := v.addBits()
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.dt[u&maxTableMask].setNBits(nBits)
+ newState := (nextState << nBits) - tableSize
+ if newState > tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.dt[u&maxTableMask].setNewState(newState)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index 5442061b1..ab26326a8 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0
}
-// prepare will prepare and allocate scratch tables used for both compression and decompression.
-func (s *fseEncoder) prepare() (*fseEncoder, error) {
- if s == nil {
- s = &fseEncoder{}
- }
- s.useRLE = false
- if s.clearCount && s.maxCount == 0 {
- for i := range s.count {
- s.count[i] = 0
- }
- s.clearCount = false
- }
- return s, nil
-}
-
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
@@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu]
}
-// encode the output symbol provided and write it to the bitstream.
-func (c *cState) encode(symbolTT symbolTransform) {
- nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
- dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
- c.bw.addBits16NC(c.state, uint8(nbBitsOut))
- c.state = c.stateTable[dstState]
-}
-
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go
deleted file mode 100644
index 7f2210e05..000000000
--- a/vendor/github.com/klauspost/compress/zstd/fuzz.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build ignorecrc
-// +build ignorecrc
-
-// Copyright 2019+ Klaus Post. All rights reserved.
-// License information can be found in the LICENSE file.
-// Based on work by Yann Collet, released under BSD License.
-
-package zstd
-
-// ignoreCRC can be used for fuzz testing to ignore CRC values...
-const ignoreCRC = true
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
deleted file mode 100644
index 6811c68a8..000000000
--- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !ignorecrc
-// +build !ignorecrc
-
-// Copyright 2019+ Klaus Post. All rights reserved.
-// License information can be found in the LICENSE file.
-// Based on work by Yann Collet, released under BSD License.
-
-package zstd
-
-// ignoreCRC can be used for fuzz testing to ignore CRC values...
-const ignoreCRC = false
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
index cf33f29a1..5d73c21eb 100644
--- a/vendor/github.com/klauspost/compress/zstd/hash.go
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
-
-// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash3(u uint32, h uint8) uint32 {
- return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index 28b40153c..09164856d 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -37,24 +37,21 @@ func (h *history) reset() {
h.ignoreBuffer = 0
h.error = false
h.recentOffsets = [3]int{1, 4, 8}
- if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
+ h.decoders.freeDecoders()
h.decoders = sequenceDecs{br: h.decoders.br}
+ h.freeHuffDecoder()
+ h.huffTree = nil
+ h.dict = nil
+ //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) freeHuffDecoder() {
if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
+ h.huffTree = nil
}
}
- h.huffTree = nil
- h.dict = nil
- //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
func (h *history) setDict(dict *dict) {
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
index 69aa3bb58..777290d44 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -2,12 +2,7 @@
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 2c112a0ab..fc40c8200 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -18,19 +18,11 @@ const (
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -52,10 +44,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index cea178561..ddb63aa91 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,3 +1,4 @@
+//go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
@@ -5,212 +6,205 @@
#include "textflag.h"
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
-
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
// Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
- SUBQ $32, BX
+ SUBQ $32, end
// Check whether we have at least one block.
- CMPQ DX, $32
+ CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
JMP afterBlocks
noBlocks:
- MOVQ ·prime5v(SB), AX
+ MOVQ ·primes+32(SB), h
afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
-
- CMPQ SI, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
-
- CMPQ SI, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
-
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
JGE finalize
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
+ CMPQ p, end
+ JL loop1
finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
RET
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
// Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
// Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
+ blockLoop()
// Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 4d64a17d6..17901e080 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -1,13 +1,17 @@
-// +build gc,!purego,!noasm
+//go:build !appengine && gc && !purego && !noasm
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
#include "textflag.h"
-// Register allocation.
+// Registers:
#define digest R1
-#define h R2 // Return value.
-#define p R3 // Input pointer.
-#define len R4
-#define nblocks R5 // len / 32.
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
#define prime1 R7
#define prime2 R8
#define prime3 R9
@@ -25,60 +29,52 @@
#define round(acc, x) \
MADD prime2, acc, x, acc \
ROR $64-31, acc \
- MUL prime1, acc \
+ MUL prime1, acc
-// x = round(0, x).
+// round0 performs the operation x = round(0, x).
#define round0(x) \
MUL prime2, x \
ROR $64-31, x \
- MUL prime1, x \
-
-#define mergeRound(x) \
- round0(x) \
- EOR x, h \
- MADD h, prime4, prime1, h \
-
-// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
-#define blocksLoop() \
- LSR $5, len, nblocks \
- PCALIGN $16 \
- loop: \
- LDP.P 32(p), (x1, x2) \
- round(v1, x1) \
- LDP -16(p), (x3, x4) \
- round(v2, x2) \
- SUB $1, nblocks \
- round(v3, x3) \
- round(v4, x4) \
- CBNZ nblocks, loop \
-
-// The primes are repeated here to ensure that they're stored
-// in a contiguous array, so we can load them with LDP.
-DATA primes<> +0(SB)/8, $11400714785074694791
-DATA primes<> +8(SB)/8, $14029467366897019727
-DATA primes<>+16(SB)/8, $1609587929392839161
-DATA primes<>+24(SB)/8, $9650029242287828579
-DATA primes<>+32(SB)/8, $2870177450012600261
-GLOBL primes<>(SB), NOPTR+RODATA, $40
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
- LDP b_base+0(FP), (p, len)
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
- LDP primes<> +0(SB), (prime1, prime2)
- LDP primes<>+16(SB), (prime3, prime4)
- MOVD primes<>+32(SB), prime5
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
- CMP $32, len
- CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
- BLO afterLoop
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
ADD prime1, prime2, v1
MOVD prime2, v2
MOVD $0, v3
NEG prime1, v4
- blocksLoop()
+ blockLoop()
ROR $64-1, v1, x1
ROR $64-7, v2, x2
@@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
ADD x3, x4
ADD x2, x4, h
- mergeRound(v1)
- mergeRound(v2)
- mergeRound(v3)
- mergeRound(v4)
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
afterLoop:
- ADD len, h
+ ADD n, h
- TBZ $4, len, try8
+ TBZ $4, n, try8
LDP.P 16(p), (x1, x2)
round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
ROR $64-27, h
EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
round0(x2)
ROR $64-27, h
- EOR x2 @> 64-27, h
+ EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h
try8:
- TBZ $3, len, try4
+ TBZ $3, n, try4
MOVD.P 8(p), x1
round0(x1)
ROR $64-27, h
- EOR x1 @> 64-27, h
+ EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
try4:
- TBZ $2, len, try2
+ TBZ $2, n, try2
MOVWU.P 4(p), x2
MUL prime1, x2
ROR $64-23, h
- EOR x2 @> 64-23, h
+ EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h
try2:
- TBZ $1, len, try1
+ TBZ $1, n, try1
MOVHU.P 2(p), x3
AND $255, x3, x1
LSR $8, x3, x2
MUL prime5, x1
ROR $64-11, h
- EOR x1 @> 64-11, h
+ EOR x1 @> 64-11, h, h
MUL prime1, h
MUL prime5, x2
ROR $64-11, h
- EOR x2 @> 64-11, h
+ EOR x2 @> 64-11, h, h
MUL prime1, h
try1:
- TBZ $0, len, end
+ TBZ $0, n, finalize
MOVBU (p), x4
MUL prime5, x4
ROR $64-11, h
- EOR x4 @> 64-11, h
+ EOR x4 @> 64-11, h, h
MUL prime1, h
-end:
+finalize:
EOR h >> 33, h
MUL prime2, h
EOR h >> 29, h
@@ -163,24 +163,22 @@ end:
RET
// func writeBlocks(d *Digest, b []byte) int
-//
-// Assumes len(b) >= 32.
-TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
- LDP primes<>(SB), (prime1, prime2)
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
- LDP b_base+8(FP), (p, len)
+ LDP b_base+8(FP), (p, n)
- blocksLoop()
+ blockLoop()
// Store updated state.
STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest)
- BIC $31, len
- MOVD len, ret+32(FP)
+ BIC $31, n
+ MOVD n, ret+32(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 1a1fac9c2..d4221edf4 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -13,4 +13,4 @@ package xxhash
func Sum64(b []byte) uint64
//go:noescape
-func writeBlocks(d *Digest, b []byte) int
+func writeBlocks(s *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 209cb4a99..0be16cefc 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 819f1461b..9405fcf10 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -73,6 +73,7 @@ type sequenceDecs struct {
seqSize int
windowSize int
maxBits uint8
+ maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
@@ -98,153 +99,28 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
return nil
}
-// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decode(seqs []seqVals) error {
- br := s.br
-
- // Grab full sizes tables, to avoid bounds checks.
- llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
- llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
- s.seqSize = 0
- litRemain := len(s.literals)
- maxBlockSize := maxCompressedBlockSize
- if s.windowSize < maxBlockSize {
- maxBlockSize = s.windowSize
- }
- for i := range seqs {
- var ll, mo, ml int
- if br.off > 4+((maxOffsetBits+16+16)>>3) {
- // inlined function:
- // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
-
- // Final will not read from stream.
- var llB, mlB, moB uint8
- ll, llB = llState.final()
- ml, mlB = mlState.final()
- mo, moB = ofState.final()
-
- // extra bits are stored in reverse order.
- br.fillFast()
- mo += br.getBits(moB)
- if s.maxBits > 32 {
- br.fillFast()
- }
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
- if moB > 1 {
- s.prevOffset[2] = s.prevOffset[1]
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = mo
- } else {
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
- if ll == 0 {
- // There is an exception though, when current sequence's literals_length = 0.
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
- mo++
- }
-
- if mo == 0 {
- mo = s.prevOffset[0]
- } else {
- var temp int
- if mo == 3 {
- temp = s.prevOffset[0] - 1
- } else {
- temp = s.prevOffset[mo]
- }
-
- if temp == 0 {
- // 0 is not valid; input is corrupted; force offset to 1
- println("WARNING: temp was 0")
- temp = 1
- }
-
- if mo != 1 {
- s.prevOffset[2] = s.prevOffset[1]
- }
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = temp
- mo = temp
- }
- }
- br.fillFast()
- } else {
- if br.overread() {
- if debugDecoder {
- printf("reading sequence %d, exceeded available data\n", i)
- }
- return io.ErrUnexpectedEOF
- }
- ll, mo, ml = s.next(br, llState, mlState, ofState)
- br.fill()
- }
-
- if debugSequences {
- println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
- }
- // Evaluate.
- // We might be doing this async, so do it early.
- if mo == 0 && ml > 0 {
- return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
- }
- if ml > maxMatchLen {
- return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
- }
- s.seqSize += ll + ml
- if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
- }
- litRemain -= ll
- if litRemain < 0 {
- return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
- }
- seqs[i] = seqVals{
- ll: ll,
- ml: ml,
- mo: mo,
- }
- if i == len(seqs)-1 {
- // This is the last sequence, so we shouldn't update state.
- break
- }
-
- // Manually inlined, ~ 5-20% faster
- // Update all 3 states at once. Approx 20% faster.
- nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
- if nBits == 0 {
- llState = llTable[llState.newState()&maxTableMask]
- mlState = mlTable[mlState.newState()&maxTableMask]
- ofState = ofTable[ofState.newState()&maxTableMask]
- } else {
- bits := br.get32BitsFast(nBits)
- lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
- llState = llTable[(llState.newState()+lowBits)&maxTableMask]
-
- lowBits = uint16(bits >> (ofState.nbBits() & 31))
- lowBits &= bitMask[mlState.nbBits()&15]
- mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
-
- lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
- ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
- }
+func (s *sequenceDecs) freeDecoders() {
+ if f := s.litLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.litLengths.fse = nil
}
- s.seqSize += litRemain
- if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ if f := s.offsets.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.offsets.fse = nil
}
- err := br.close()
- if err != nil {
- printf("Closing sequences: %v, %+v\n", err, *br)
+ if f := s.matchLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.matchLengths.fse = nil
}
- return err
}
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
+ if len(s.dict) == 0 {
+ return s.executeSimple(seqs, hist)
+ }
+
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
@@ -327,6 +203,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
}
}
+
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
@@ -341,23 +218,30 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decodeSync(history *history) error {
+func (s *sequenceDecs) decodeSync(hist []byte) error {
+ supported, err := s.decodeSyncSimple(hist)
+ if supported {
+ return err
+ }
+
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
- hist := history.b[history.ignoreBuffer:]
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
+ if debugDecoder {
+ println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
- printf("reading sequence %d, exceeded available data\n", seqs-i)
+ printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
@@ -433,7 +317,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@@ -463,13 +347,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
@@ -530,6 +414,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
+
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@@ -542,9 +427,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
}
- // Check if space for literals
- if len(s.literals)+len(s.out)-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
+ if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
// Add final literals
@@ -552,16 +436,6 @@ func (s *sequenceDecs) decodeSync(history *history) error {
return br.close()
}
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) update(br *bitReader) {
- // Max 8 bits
- s.litLengths.state.next(br)
- // Max 9 bits
- s.matchLengths.state.next(br)
- // Max 8 bits
- s.offsets.state.next(br)
-}
-
var bitMask [16]uint16
func init() {
@@ -570,87 +444,6 @@ func init() {
}
}
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) updateAlt(br *bitReader) {
- // Update all 3 states at once. Approx 20% faster.
- a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
-
- nBits := a.nbBits() + b.nbBits() + c.nbBits()
- if nBits == 0 {
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
- s.offsets.state.state = s.offsets.state.dt[c.newState()]
- return
- }
- bits := br.get32BitsFast(nBits)
- lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
-
- lowBits = uint16(bits >> (c.nbBits() & 31))
- lowBits &= bitMask[b.nbBits()&15]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
-
- lowBits = uint16(bits) & bitMask[c.nbBits()&15]
- s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
-}
-
-// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
-func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
- // Final will not read from stream.
- ll, llB := llState.final()
- ml, mlB := mlState.final()
- mo, moB := ofState.final()
-
- // extra bits are stored in reverse order.
- br.fillFast()
- mo += br.getBits(moB)
- if s.maxBits > 32 {
- br.fillFast()
- }
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
- if moB > 1 {
- s.prevOffset[2] = s.prevOffset[1]
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = mo
- return
- }
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
- if ll == 0 {
- // There is an exception though, when current sequence's literals_length = 0.
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
- mo++
- }
-
- if mo == 0 {
- mo = s.prevOffset[0]
- return
- }
- var temp int
- if mo == 3 {
- temp = s.prevOffset[0] - 1
- } else {
- temp = s.prevOffset[mo]
- }
-
- if temp == 0 {
- // 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
- temp = 1
- }
-
- if mo != 1 {
- s.prevOffset[2] = s.prevOffset[1]
- }
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = temp
- mo = temp
- return
-}
-
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
new file mode 100644
index 000000000..8adabd828
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -0,0 +1,394 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+type decodeSyncAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ litRemain int
+ out []byte
+ outPosition int
+ literals []byte
+ litPosition int
+ history []byte
+ windowSize int
+ ll int // set on error (not for all errors, please refer to _generate/gen.go)
+ ml int // set on error (not for all errors, please refer to _generate/gen.go)
+ mo int // set on error (not for all errors, please refer to _generate/gen.go)
+}
+
+// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// decode sequences from the stream with the provided history but without a dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ if len(s.dict) > 0 {
+ return false, nil
+ }
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
+ return false, nil
+ }
+
+ // FIXME: Using unsafe memory copies leads to rare, random crashes
+ // with fuzz testing. It is therefore disabled for now.
+ const useSafe = true
+ /*
+ useSafe := false
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
+ useSafe = true
+ }
+ if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
+ useSafe = true
+ }
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ useSafe = true
+ }
+ */
+
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeSyncAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ iteration: s.nSeqs - 1,
+ litRemain: len(s.literals),
+ out: s.out,
+ outPosition: len(s.out),
+ literals: s.literals,
+ windowSize: s.windowSize,
+ history: hist,
+ }
+
+ s.seqSize = 0
+ startSize := len(s.out)
+
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
+ }
+ } else {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
+ }
+ }
+ switch errCode {
+ case noError:
+ break
+
+ case errorMatchLenOfsMismatch:
+ return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
+
+ case errorMatchLenTooBig:
+ return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
+
+ case errorMatchOffTooBig:
+ return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ ctx.mo, ctx.outPosition+len(hist)-startSize)
+
+ case errorNotEnoughLiterals:
+ return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
+ ctx.ll, ctx.litRemain+ctx.ll)
+
+ case errorOverread:
+ return true, io.ErrUnexpectedEOF
+
+ case errorNotEnoughSpace:
+ size := ctx.outPosition + ctx.ll + ctx.ml
+ if debugDecoder {
+ println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
+ }
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
+ default:
+ return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ return true, err
+ }
+
+ s.literals = s.literals[ctx.litPosition:]
+ t := ctx.outPosition
+ s.out = s.out[:t]
+
+ // Add final literals
+ s.out = append(s.out, s.literals...)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(s.out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
+ }
+ }
+
+ return true, nil
+}
+
+// --------------------------------------------------------------------------------
+
+type decodeAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ seqs []seqVals
+ litRemain int
+}
+
+const noError = 0
+
+// error reported when mo == 0 && ml > 0
+const errorMatchLenOfsMismatch = 1
+
+// error reported when ml > maxMatchLen
+const errorMatchLenTooBig = 2
+
+// error reported when mo > available history or mo > s.windowSize
+const errorMatchOffTooBig = 3
+
+// error reported when the sum of literal lengths exeeceds the literal buffer size
+const errorNotEnoughLiterals = 4
+
+// error reported when capacity of `out` is too small
+const errorNotEnoughSpace = 5
+
+// error reported when bits are overread.
+const errorOverread = 6
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ seqs: seqs,
+ iteration: len(seqs) - 1,
+ litRemain: len(s.literals),
+ }
+
+ if debugDecoder {
+ println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
+ }
+
+ s.seqSize = 0
+ lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
+ }
+ } else {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_amd64(s, br, &ctx)
+ }
+ }
+ if errCode != 0 {
+ i := len(seqs) - ctx.iteration - 1
+ switch errCode {
+ case errorMatchLenOfsMismatch:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+
+ case errorMatchLenTooBig:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+
+ case errorNotEnoughLiterals:
+ ll := ctx.seqs[i].ll
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+ case errorOverread:
+ return io.ErrUnexpectedEOF
+ }
+
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ }
+
+ if ctx.litRemain < 0 {
+ return fmt.Errorf("literal count is too big: total available %d, total requested %d",
+ len(s.literals), len(s.literals)-ctx.litRemain)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ if debugDecoder {
+ println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// --------------------------------------------------------------------------------
+
+type executeAsmContext struct {
+ seqs []seqVals
+ seqIndex int
+ out []byte
+ history []byte
+ literals []byte
+ outPosition int
+ litPosition int
+ windowSize int
+}
+
+// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
+//
+// Returns false if a match offset is too big.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+
+// Same as above, but with safe memcopies
+//
+//go:noescape
+func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+
+// executeSimple handles cases when dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
+ addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ ctx := executeAsmContext{
+ seqs: seqs,
+ seqIndex: 0,
+ out: out,
+ history: hist,
+ outPosition: t,
+ litPosition: 0,
+ literals: s.literals,
+ windowSize: s.windowSize,
+ }
+ var ok bool
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
+ } else {
+ ok = sequenceDecs_executeSimple_amd64(&ctx)
+ }
+ if !ok {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
+ }
+ s.literals = s.literals[ctx.litPosition:]
+ t = ctx.outPosition
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
new file mode 100644
index 000000000..b6f4ba6fc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -0,0 +1,4175 @@
+// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_end
+
+sequenceDecs_decode_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_2_end
+
+sequenceDecs_decode_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_2_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_amd64_adjust_zero
+ JEQ sequenceDecs_decode_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_amd64_adjust_three
+ JMP sequenceDecs_decode_amd64_adjust_two
+
+sequenceDecs_decode_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_56_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_56_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_56_amd64_fill_end
+
+sequenceDecs_decode_56_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_56_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_56_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_56_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_56_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_56_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_amd64_adjust_zero
+ JEQ sequenceDecs_decode_56_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_amd64_adjust_three
+ JMP sequenceDecs_decode_56_amd64_adjust_two
+
+sequenceDecs_decode_56_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_56_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_56_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_56_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_56_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_56_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_end
+
+sequenceDecs_decode_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_2_end
+
+sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_2_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_bmi2_adjust_three
+ JMP sequenceDecs_decode_bmi2_adjust_two
+
+sequenceDecs_decode_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_56_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_56_bmi2_fill_end
+
+sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_56_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_56_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decode_56_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_56_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_56_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_56_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_bmi2_adjust_three
+ JMP sequenceDecs_decode_56_bmi2_adjust_two
+
+sequenceDecs_decode_56_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_56_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_56_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_56_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_56_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_56_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (SI)(R14*1), X0
+ MOVUPS X0, (BX)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, R11
+ JB copy_1
+ ADDQ R11, SI
+ ADDQ R11, BX
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ BX, R12
+ ADDQ R13, BX
+
+copy_2:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ MOVQ R11, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (SI), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, SI
+ ADDQ $0x10, BX
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(SI)(R14*1), SI
+ LEAQ 16(BX)(R14*1), BX
+ MOVUPS -16(SI), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ R11, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ R11, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (SI), R14
+ MOVB -1(SI)(R11*1), R15
+ MOVB R14, (BX)
+ MOVB R15, -1(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (SI), R14
+ MOVB 2(SI), R15
+ MOVW R14, (BX)
+ MOVB R15, 2(BX)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (SI), R14
+ MOVL -4(SI)(R11*1), R15
+ MOVL R14, (BX)
+ MOVL R15, -4(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (SI), R14
+ MOVQ -8(SI)(R11*1), R15
+ MOVQ R14, (BX)
+ MOVQ R15, -8(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+
+copy_1_end:
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R11
+ ADDQ $0x10, BX
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(R11)(R12*1), R11
+ LEAQ 16(BX)(R12*1), BX
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (R11), R12
+ MOVB -1(R11)(R13*1), R14
+ MOVB R12, (BX)
+ MOVB R14, -1(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (R11), R12
+ MOVB 2(R11), R14
+ MOVW R12, (BX)
+ MOVB R14, 2(BX)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (R11), R12
+ MOVL -4(R11)(R13*1), R14
+ MOVL R12, (BX)
+ MOVL R14, -4(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (R11), R12
+ MOVQ -8(R11)(R13*1), R14
+ MOVQ R12, (BX)
+ MOVQ R14, -8(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ SUBQ 80(AX), SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_end
+
+sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_end
+
+sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R11)(R14*1), X0
+ MOVUPS X0, (R10)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R10, CX
+ ADDQ R13, R10
+
+copy_2:
+ MOVUPS (AX), X0
+ MOVUPS X0, (CX)
+ ADDQ $0x10, AX
+ ADDQ $0x10, CX
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_end
+
+sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R10)(R14*1), X0
+ MOVUPS X0, (R9)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R9, R12
+ ADDQ R13, R9
+
+copy_2:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_safe_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_safe_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_safe_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ MOVQ AX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R10
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R11)(R14*1), R11
+ LEAQ 16(R10)(R14*1), R10
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ AX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ AX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R11), R14
+ MOVB -1(R11)(AX*1), R15
+ MOVB R14, (R10)
+ MOVB R15, -1(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R11), R14
+ MOVB 2(R11), R15
+ MOVW R14, (R10)
+ MOVB R15, 2(R10)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R11), R14
+ MOVL -4(R11)(AX*1), R15
+ MOVL R14, (R10)
+ MOVL R15, -4(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R11), R14
+ MOVQ -8(R11)(AX*1), R15
+ MOVQ R14, (R10)
+ MOVQ R15, -8(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+
+copy_1_end:
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (AX), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, AX
+ ADDQ $0x10, R10
+ SUBQ $0x10, CX
+ JAE copy_2_loop
+ LEAQ 16(AX)(CX*1), AX
+ LEAQ 16(R10)(CX*1), R10
+ MOVUPS -16(AX), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (AX), CL
+ MOVB -1(AX)(R13*1), R14
+ MOVB CL, (R10)
+ MOVB R14, -1(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (AX), CX
+ MOVB 2(AX), R14
+ MOVW CX, (R10)
+ MOVB R14, 2(R10)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (AX), CX
+ MOVL -4(AX)(R13*1), R14
+ MOVL CX, (R10)
+ MOVL R14, -4(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (AX), CX
+ MOVQ -8(AX)(R13*1), R14
+ MOVQ CX, (R10)
+ MOVQ R14, -8(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_safe_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_safe_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_safe_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ MOVQ CX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R10), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R10
+ ADDQ $0x10, R9
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R10)(R14*1), R10
+ LEAQ 16(R9)(R14*1), R9
+ MOVUPS -16(R10), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ CX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ CX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R10), R14
+ MOVB -1(R10)(CX*1), R15
+ MOVB R14, (R9)
+ MOVB R15, -1(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R10), R14
+ MOVB 2(R10), R15
+ MOVW R14, (R9)
+ MOVB R15, 2(R9)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R10), R14
+ MOVL -4(R10)(CX*1), R15
+ MOVL R14, (R9)
+ MOVL R15, -4(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R10), R14
+ MOVQ -8(R10)(CX*1), R15
+ MOVQ R14, (R9)
+ MOVQ R15, -8(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+
+copy_1_end:
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R9
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(CX)(R12*1), CX
+ LEAQ 16(R9)(R12*1), R9
+ MOVUPS -16(CX), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (CX), R12
+ MOVB -1(CX)(R13*1), R14
+ MOVB R12, (R9)
+ MOVB R14, -1(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (CX), R12
+ MOVB 2(CX), R14
+ MOVW R12, (R9)
+ MOVB R14, 2(R9)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (CX), R12
+ MOVL -4(CX)(R13*1), R14
+ MOVL R12, (R9)
+ MOVL R14, -4(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (CX), R12
+ MOVQ -8(CX)(R13*1), R14
+ MOVQ R12, (R9)
+ MOVQ R14, -8(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_safe_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
new file mode 100644
index 000000000..ac2a80d29
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -0,0 +1,237 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+)
+
+// decode sequences from the stream with the provided history but without dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ return false, nil
+}
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ s.seqSize = 0
+ litRemain := len(s.literals)
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+ for i := range seqs {
+ var ll, mo, ml int
+ if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ if br.overread() {
+ if debugDecoder {
+ printf("reading sequence %d, exceeded available data\n", i)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+ // Evaluate.
+ // We might be doing this async, so do it early.
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+ s.seqSize += ll + ml
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ litRemain -= ll
+ if litRemain < 0 {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
+ }
+ seqs[i] = seqVals{
+ ll: ll,
+ ml: ml,
+ mo: mo,
+ }
+ if i == len(seqs)-1 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+ s.seqSize += litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// executeSimple handles cases when a dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Malformed input
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into the current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+
+ // We must be in the current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index ffffcbc25..29c15c8c4 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -18,26 +18,44 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
-var zipReaderPool sync.Pool
+// zipReaderPool is the default reader pool.
+var zipReaderPool = sync.Pool{New: func() interface{} {
+ z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
+ if err != nil {
+ panic(err)
+ }
+ return z
+}}
// newZipReader creates a pooled zip decompressor.
-func newZipReader(r io.Reader) io.ReadCloser {
- dec, ok := zipReaderPool.Get().(*Decoder)
- if ok {
- dec.Reset(r)
- } else {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
+func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ pool := &zipReaderPool
+ if len(opts) > 0 {
+ opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
+ // Force concurrency 1
+ opts = append(opts, WithDecoderConcurrency(1))
+ // Create our own pool
+ pool = &sync.Pool{}
+ }
+ return func(r io.Reader) io.ReadCloser {
+ dec, ok := pool.Get().(*Decoder)
+ if ok {
+ dec.Reset(r)
+ } else {
+ d, err := NewReader(r, opts...)
+ if err != nil {
+ panic(err)
+ }
+ dec = d
}
- dec = d
+ return &pooledZipReader{dec: dec, pool: pool}
}
- return &pooledZipReader{dec: dec}
}
type pooledZipReader struct {
- mu sync.Mutex // guards Close and Read
- dec *Decoder
+ mu sync.Mutex // guards Close and Read
+ pool *sync.Pool
+ dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
@@ -48,8 +66,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
}
dec, err := r.dec.Read(p)
if err == io.EOF {
- err = r.dec.Reset(nil)
- zipReaderPool.Put(r.dec)
+ r.dec.Reset(nil)
+ r.pool.Put(r.dec)
r.dec = nil
}
return dec, err
@@ -61,7 +79,7 @@ func (r *pooledZipReader) Close() error {
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
- zipReaderPool.Put(r.dec)
+ r.pool.Put(r.dec)
r.dec = nil
}
return err
@@ -115,6 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
-func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return newZipReader
+// Options can be specified. WithDecoderConcurrency(1) is forced,
+// and by default a 128MB maximum decompression window is specified.
+// The window size can be overridden if required.
+func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ return newZipReader(opts...)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index c1c90b4a0..89396673d 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -36,9 +36,6 @@ const forcePreDef = false
// zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3
-// Reset the buffer offset when reaching this.
-const bufferReset = math.MaxInt32 - MaxWindowSize
-
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
@@ -75,7 +72,6 @@ var (
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
- // For the time being dictionaries are not supported.
ErrUnknownDictionary = errors.New("unknown dictionary")
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
@@ -110,49 +106,33 @@ func printf(format string, a ...interface{}) {
}
}
-// matchLenFast does matching, but will not match the last up to 7 bytes.
-func matchLenFast(a, b []byte) int {
- endI := len(a) & (math.MaxInt32 - 7)
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- return i + bits.TrailingZeros64(diff)>>3
- }
- }
- return endI
-}
-
-// matchLen returns the maximum length.
+// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
-// The function also returns whether all bytes matched.
-func matchLen(a, b []byte) int {
- b = b[:len(a)]
- for i := 0; i < len(a)-7; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- return i + (bits.TrailingZeros64(diff) >> 3)
+func matchLen(a, b []byte) (n int) {
+ for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
+ diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
+ if diff != 0 {
+ return n + bits.TrailingZeros64(diff)>>3
}
+ n += 8
}
- checked := (len(a) >> 3) << 3
- a = a[checked:]
- b = b[checked:]
for i := range a {
if a[i] != b[i] {
- return i + checked
+ break
}
+ n++
}
- return len(a) + checked
+ return n
+
}
func load3232(b []byte, i int32) uint32 {
- return binary.LittleEndian.Uint32(b[i:])
+ return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
}
func load6432(b []byte, i int32) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
-}
-
-func load64(b []byte, i int) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
+ return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
}
type byter interface {
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
index ea7df3dd8..accd7abaf 100644
--- a/vendor/github.com/klauspost/cpuid/v2/README.md
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -16,10 +16,23 @@ Package home: https://github.com/klauspost/cpuid
## installing
-`go get -u github.com/klauspost/cpuid/v2` using modules.
-
+`go get -u github.com/klauspost/cpuid/v2` using modules.
Drop `v2` for others.
+Installing binary:
+
+`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
+
+Or download binaries from release page: https://github.com/klauspost/cpuid/releases
+
+### Homebrew
+
+For macOS/Linux users, you can install via [brew](https://brew.sh/)
+
+```sh
+$ brew install cpuid
+```
+
## example
```Go
@@ -77,10 +90,14 @@ We have Streaming SIMD 2 Extensions
The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
+To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc.
+This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported.
+
Note that for some cpu/os combinations some features will not be detected.
`amd64` has rather good support and should work reliably on all platforms.
-Note that hypervisors may not pass through all CPU features.
+Note that hypervisors may not pass through all CPU features through to the guest OS,
+so even if your host supports a feature it may not be visible on guests.
## arm64 feature detection
@@ -253,6 +270,224 @@ Exit Code 0
Exit Code 1
```
+
+## Available flags
+
+### x86 & amd64
+
+| Feature Flag | Description |
+|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) |
+| AESNI | Advanced Encryption Standard New Instructions |
+| AMD3DNOW | AMD 3DNOW |
+| AMD3DNOWEXT | AMD 3DNowExt |
+| AMXBF16 | Tile computational operations on BFLOAT16 numbers |
+| AMXINT8 | Tile computational operations on 8-bit integers |
+| AMXFP16 | Tile computational operations on FP16 numbers |
+| AMXTILE | Tile architecture |
+| AVX | AVX functions |
+| AVX2 | AVX2 functions |
+| AVX512BF16 | AVX-512 BFLOAT16 Instructions |
+| AVX512BITALG | AVX-512 Bit Algorithms |
+| AVX512BW | AVX-512 Byte and Word Instructions |
+| AVX512CD | AVX-512 Conflict Detection Instructions |
+| AVX512DQ | AVX-512 Doubleword and Quadword Instructions |
+| AVX512ER | AVX-512 Exponential and Reciprocal Instructions |
+| AVX512F | AVX-512 Foundation |
+| AVX512FP16 | AVX-512 FP16 Instructions |
+| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions |
+| AVX512PF | AVX-512 Prefetch Instructions |
+| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions |
+| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 |
+| AVX512VL | AVX-512 Vector Length Extensions |
+| AVX512VNNI | AVX-512 Vector Neural Network Instructions |
+| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q |
+| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword |
+| AVXIFMA | AVX-IFMA instructions |
+| AVXNECONVERT | AVX-NE-CONVERT instructions |
+| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one |
+| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions |
+| AVXVNNIINT8 | AVX-VNNI-INT8 instructions |
+| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 |
+| BMI1 | Bit Manipulation Instruction Set 1 |
+| BMI2 | Bit Manipulation Instruction Set 2 |
+| CETIBT | Intel CET Indirect Branch Tracking |
+| CETSS | Intel CET Shadow Stack |
+| CLDEMOTE | Cache Line Demote |
+| CLMUL | Carry-less Multiplication |
+| CLZERO | CLZERO instruction supported |
+| CMOV | i686 CMOV |
+| CMPCCXADD | CMPCCXADD instructions |
+| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB |
+| CMPXCHG8 | CMPXCHG8 instruction |
+| CPBOOST | Core Performance Boost |
+| CPPC | AMD: Collaborative Processor Performance Control |
+| CX16 | CMPXCHG16B Instruction |
+| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ |
+| ENQCMD | Enqueue Command |
+| ERMS | Enhanced REP MOVSB/STOSB |
+| F16C | Half-precision floating-point conversion |
+| FLUSH_L1D | Flush L1D cache |
+| FMA3 | Intel FMA 3. Does not imply AVX. |
+| FMA4 | Bulldozer FMA4 functions |
+| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide |
+| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide |
+| FSRM | Fast Short Rep Mov |
+| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 |
+| FXSROPT | FXSAVE/FXRSTOR optimizations |
+| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. |
+| HLE | Hardware Lock Elision |
+| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR |
+| HTT | Hyperthreading (enabled) |
+| HWA | Hardware assert supported. Indicates support for MSRC001_10 |
+| HYBRID_CPU | This part has CPUs of more than one type. |
+| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors |
+| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) |
+| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR |
+| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) |
+| IBRS | AMD: Indirect Branch Restricted Speculation |
+| IBRS_PREFERRED | AMD: IBRS is preferred over software solution |
+| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection |
+| IBS | Instruction Based Sampling (AMD) |
+| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) |
+| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) |
+| IBSFFV | Instruction Based Sampling Feature (AMD) |
+| IBSOPCNT | Instruction Based Sampling Feature (AMD) |
+| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) |
+| IBSOPSAM | Instruction Based Sampling Feature (AMD) |
+| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) |
+| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) |
+| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported |
+| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported |
+| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse |
+| IBS_PREVENTHOST | Disallowing IBS use by the host supported |
+| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 |
+| IDPRED_CTRL | IPRED_DIS |
+| INT_WBINVD | WBINVD/WBNOINVD are interruptible. |
+| INVLPGB | NVLPGB and TLBSYNC instruction supported |
+| LAHF | LAHF/SAHF in long mode |
+| LAM | If set, CPU supports Linear Address Masking |
+| LBRVIRT | LBR virtualization |
+| LZCNT | LZCNT instruction |
+| MCAOVERFLOW | MCA overflow recovery support. |
+| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. |
+| MCOMMIT | MCOMMIT instruction supported |
+| MD_CLEAR | VERW clears CPU buffers |
+| MMX | standard MMX |
+| MMXEXT | SSE integer functions or AMD MMX ext |
+| MOVBE | MOVBE instruction (big-endian) |
+| MOVDIR64B | Move 64 Bytes as Direct Store |
+| MOVDIRI | Move Doubleword as Direct Store |
+| MOVSB_ZL | Fast Zero-Length MOVSB |
+| MPX | Intel MPX (Memory Protection Extensions) |
+| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD |
+| MSRIRC | Instruction Retired Counter MSR available |
+| MSRLIST | Read/Write List of Model Specific Registers |
+| MSR_PAGEFLUSH | Page Flush MSR available |
+| NRIPS | Indicates support for NRIP save on VMEXIT |
+| NX | NX (No-Execute) bit |
+| OSXSAVE | XSAVE enabled by OS |
+| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption |
+| POPCNT | POPCNT instruction |
+| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled |
+| PREFETCHI | PREFETCHIT0/1 instructions |
+| PSFD | Predictive Store Forward Disable |
+| RDPRU | RDPRU instruction supported |
+| RDRAND | RDRAND instruction is available |
+| RDSEED | RDSEED instruction is available |
+| RDTSCP | RDTSCP Instruction |
+| RRSBA_CTRL | Restricted RSB Alternate |
+| RTM | Restricted Transactional Memory |
+| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. |
+| SERIALIZE | Serialize Instruction Execution |
+| SEV | AMD Secure Encrypted Virtualization supported |
+| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host |
+| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported |
+| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests |
+| SEV_ES | AMD SEV Encrypted State supported |
+| SEV_RESTRICTED | AMD SEV Restricted Injection supported |
+| SEV_SNP | AMD SEV Secure Nested Paging supported |
+| SGX | Software Guard Extensions |
+| SGXLC | Software Guard Extensions Launch Control |
+| SHA | Intel SHA Extensions |
+| SME | AMD Secure Memory Encryption supported |
+| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced |
+| SPEC_CTRL_SSBD | Speculative Store Bypass Disable |
+| SRBDS_CTRL | SRBDS mitigation MSR available |
+| SSE | SSE functions |
+| SSE2 | P4 SSE functions |
+| SSE3 | Prescott SSE3 functions |
+| SSE4 | Penryn SSE4.1 functions |
+| SSE42 | Nehalem SSE4.2 functions |
+| SSE4A | AMD Barcelona microarchitecture SSE4a instructions |
+| SSSE3 | Conroe SSSE3 functions |
+| STIBP | Single Thread Indirect Branch Predictors |
+| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On |
+| STOSB_SHORT | Fast short STOSB |
+| SUCCOR | Software uncorrectable error containment and recovery capability. |
+| SVM | AMD Secure Virtual Machine |
+| SVMDA | Indicates support for the SVM decode assists. |
+| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control |
+| SVML | AMD SVM lock. Indicates support for SVM-Lock. |
+| SVMNP | AMD SVM nested paging |
+| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter |
+| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold |
+| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
+| SYSEE | SYSENTER and SYSEXIT instructions |
+| TBM | AMD Trailing Bit Manipulation |
+| TDX_GUEST | Intel Trust Domain Extensions Guest |
+| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
+| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
+| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |
+| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 |
+| TSXLDTRK | Intel TSX Suspend Load Address Tracking |
+| VAES | Vector AES. AVX(512) versions requires additional checks. |
+| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. |
+| VMPL | AMD VM Permission Levels supported |
+| VMSA_REGPROT | AMD VMSA Register Protection supported |
+| VMX | Virtual Machine Extensions |
+| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. |
+| VTE | AMD Virtual Transparent Encryption supported |
+| WAITPKG | TPAUSE, UMONITOR, UMWAIT |
+| WBNOINVD | Write Back and Do Not Invalidate Cache |
+| WRMSRNS | Non-Serializing Write to Model Specific Register |
+| X87 | FPU |
+| XGETBV1 | Supports XGETBV with ECX = 1 |
+| XOP | Bulldozer XOP functions |
+| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV |
+| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. |
+| XSAVEOPT | XSAVEOPT available |
+| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS |
+
+# ARM features:
+
+| Feature Flag | Description |
+|--------------|------------------------------------------------------------------|
+| AESARM | AES instructions |
+| ARMCPUID | Some CPU ID registers readable at user-level |
+| ASIMD | Advanced SIMD |
+| ASIMDDP | SIMD Dot Product |
+| ASIMDHP | Advanced SIMD half-precision floating point |
+| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) |
+| ATOMICS | Large System Extensions (LSE) |
+| CRC32 | CRC32/CRC32C instructions |
+| DCPOP | Data cache clean to Point of Persistence (DC CVAP) |
+| EVTSTRM | Generic timer |
+| FCMA | Floatin point complex number addition and multiplication |
+| FP | Single-precision and double-precision floating point |
+| FPHP | Half-precision floating point |
+| GPA | Generic Pointer Authentication |
+| JSCVT | Javascript-style double->int convert (FJCVTZS) |
+| LRCPC | Weaker release consistency (LDAPR, etc) |
+| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) |
+| SHA1 | SHA-1 instructions (SHA1C, etc) |
+| SHA2 | SHA-2 instructions (SHA256H, etc) |
+| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) |
+| SHA512 | SHA512 instructions |
+| SM3 | SM3 instructions |
+| SM4 | SM4 instructions |
+| SVE | Scalable Vector Extension |
+
# license
This code is published under an MIT license. See LICENSE file for more information.
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
index 701f2385b..d015c744e 100644
--- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -73,6 +73,7 @@ const (
AMD3DNOW // AMD 3DNOW
AMD3DNOWEXT // AMD 3DNowExt
AMXBF16 // Tile computational operations on BFLOAT16 numbers
+ AMXFP16 // Tile computational operations on FP16 numbers
AMXINT8 // Tile computational operations on 8-bit integers
AMXTILE // Tile architecture
AVX // AVX functions
@@ -93,8 +94,12 @@ const (
AVX512VNNI // AVX-512 Vector Neural Network Instructions
AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
+ AVXIFMA // AVX-IFMA instructions
+ AVXNECONVERT // AVX-NE-CONVERT instructions
AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one
AVXVNNI // AVX (VEX encoded) VNNI neural network instructions
+ AVXVNNIINT8 // AVX-VNNI-INT8 instructions
+ BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598
BMI1 // Bit Manipulation Instruction Set 1
BMI2 // Bit Manipulation Instruction Set 2
CETIBT // Intel CET Indirect Branch Tracking
@@ -103,15 +108,22 @@ const (
CLMUL // Carry-less Multiplication
CLZERO // CLZERO instruction supported
CMOV // i686 CMOV
+ CMPCCXADD // CMPCCXADD instructions
CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB
CMPXCHG8 // CMPXCHG8 instruction
CPBOOST // Core Performance Boost
+ CPPC // AMD: Collaborative Processor Performance Control
CX16 // CMPXCHG16B Instruction
+ EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ
ENQCMD // Enqueue Command
ERMS // Enhanced REP MOVSB/STOSB
F16C // Half-precision floating-point conversion
+ FLUSH_L1D // Flush L1D cache
FMA3 // Intel FMA 3. Does not imply AVX.
FMA4 // Bulldozer FMA4 functions
+ FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide
+ FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide
+ FSRM // Fast Short Rep Mov
FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
FXSROPT // FXSAVE/FXRSTOR optimizations
GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage.
@@ -119,8 +131,14 @@ const (
HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR
HTT // Hyperthreading (enabled)
HWA // Hardware assert supported. Indicates support for MSRC001_10
+ HYBRID_CPU // This part has CPUs of more than one type.
HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
+ IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel)
+ IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR
IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
+ IBRS // AMD: Indirect Branch Restricted Speculation
+ IBRS_PREFERRED // AMD: IBRS is preferred over software solution
+ IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection
IBS // Instruction Based Sampling (AMD)
IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
@@ -130,7 +148,12 @@ const (
IBSOPSAM // Instruction Based Sampling Feature (AMD)
IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
+ IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported
+ IBS_OPDATA4 // AMD: IBS op data 4 MSR supported
+ IBS_OPFUSE // AMD: Indicates support for IbsOpFuse
IBS_PREVENTHOST // Disallowing IBS use by the host supported
+ IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4
+ IDPRED_CTRL // IPRED_DIS
INT_WBINVD // WBINVD/WBNOINVD are interruptible.
INVLPGB // NVLPGB and TLBSYNC instruction supported
LAHF // LAHF/SAHF in long mode
@@ -138,28 +161,35 @@ const (
LBRVIRT // LBR virtualization
LZCNT // LZCNT instruction
MCAOVERFLOW // MCA overflow recovery support.
+ MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it.
MCOMMIT // MCOMMIT instruction supported
+ MD_CLEAR // VERW clears CPU buffers
MMX // standard MMX
MMXEXT // SSE integer functions or AMD MMX ext
MOVBE // MOVBE instruction (big-endian)
MOVDIR64B // Move 64 Bytes as Direct Store
MOVDIRI // Move Doubleword as Direct Store
MOVSB_ZL // Fast Zero-Length MOVSB
+ MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD
MPX // Intel MPX (Memory Protection Extensions)
MSRIRC // Instruction Retired Counter MSR available
+ MSRLIST // Read/Write List of Model Specific Registers
MSR_PAGEFLUSH // Page Flush MSR available
NRIPS // Indicates support for NRIP save on VMEXIT
NX // NX (No-Execute) bit
OSXSAVE // XSAVE enabled by OS
PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption
POPCNT // POPCNT instruction
+ PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled
+ PREFETCHI // PREFETCHIT0/1 instructions
+ PSFD // Predictive Store Forward Disable
RDPRU // RDPRU instruction supported
RDRAND // RDRAND instruction is available
RDSEED // RDSEED instruction is available
RDTSCP // RDTSCP Instruction
+ RRSBA_CTRL // Restricted RSB Alternate
RTM // Restricted Transactional Memory
RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
- SCE // SYSENTER and SYSEXIT instructions
SERIALIZE // Serialize Instruction Execution
SEV // AMD Secure Encrypted Virtualization supported
SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host
@@ -173,6 +203,8 @@ const (
SHA // Intel SHA Extensions
SME // AMD Secure Memory Encryption supported
SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
+ SPEC_CTRL_SSBD // Speculative Store Bypass Disable
+ SRBDS_CTRL // SRBDS mitigation MSR available
SSE // SSE functions
SSE2 // P4 SSE functions
SSE3 // Prescott SSE3 functions
@@ -181,6 +213,7 @@ const (
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
SSSE3 // Conroe SSSE3 functions
STIBP // Single Thread Indirect Branch Predictors
+ STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On
STOSB_SHORT // Fast short STOSB
SUCCOR // Software uncorrectable error containment and recovery capability.
SVM // AMD Secure Virtual Machine
@@ -190,8 +223,13 @@ const (
SVMNP // AMD SVM nested paging
SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter
SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold
+ SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
+ SYSEE // SYSENTER and SYSEXIT instructions
TBM // AMD Trailing Bit Manipulation
+ TDX_GUEST // Intel Trust Domain Extensions Guest
+ TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
+ TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104
TSXLDTRK // Intel TSX Suspend Load Address Tracking
VAES // Vector AES. AVX(512) versions requires additional checks.
@@ -203,6 +241,7 @@ const (
VTE // AMD Virtual Transparent Encryption supported
WAITPKG // TPAUSE, UMONITOR, UMWAIT
WBNOINVD // Write Back and Do Not Invalidate Cache
+ WRMSRNS // Non-Serializing Write to Model Specific Register
X87 // FPU
XGETBV1 // Supports XGETBV with ECX = 1
XOP // Bulldozer XOP functions
@@ -253,6 +292,7 @@ type CPUInfo struct {
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
Family int // CPU family number
Model int // CPU model number
+ Stepping int // CPU stepping info
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
BoostFreq int64 // Max clock speed, if known, 0 otherwise
@@ -355,30 +395,61 @@ func (c CPUInfo) Supports(ids ...FeatureID) bool {
// Has allows for checking a single feature.
// Should be inlined by the compiler.
-func (c CPUInfo) Has(id FeatureID) bool {
+func (c *CPUInfo) Has(id FeatureID) bool {
return c.featureSet.inSet(id)
}
+// AnyOf returns whether the CPU supports one or more of the requested features.
+func (c CPUInfo) AnyOf(ids ...FeatureID) bool {
+ for _, id := range ids {
+ if c.featureSet.inSet(id) {
+ return true
+ }
+ }
+ return false
+}
+
+// Features contains several features combined for a fast check using
+// CpuInfo.HasAll
+type Features *flagSet
+
+// CombineFeatures allows to combine several features for a close to constant time lookup.
+func CombineFeatures(ids ...FeatureID) Features {
+ var v flagSet
+ for _, id := range ids {
+ v.set(id)
+ }
+ return &v
+}
+
+func (c *CPUInfo) HasAll(f Features) bool {
+ return c.featureSet.hasSetP(f)
+}
+
// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
-var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2)
-var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
-var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
-var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
+var oneOfLevel = CombineFeatures(SYSEE, SYSCALL)
+var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2)
+var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
+var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
+var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
// X64Level returns the microarchitecture level detected on the CPU.
// If features are lacking or non x64 mode, 0 is returned.
// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
func (c CPUInfo) X64Level() int {
- if c.featureSet.hasSet(level4Features) {
+ if !c.featureSet.hasOneOf(oneOfLevel) {
+ return 0
+ }
+ if c.featureSet.hasSetP(level4Features) {
return 4
}
- if c.featureSet.hasSet(level3Features) {
+ if c.featureSet.hasSetP(level3Features) {
return 3
}
- if c.featureSet.hasSet(level2Features) {
+ if c.featureSet.hasSetP(level2Features) {
return 2
}
- if c.featureSet.hasSet(level1Features) {
+ if c.featureSet.hasSetP(level1Features) {
return 1
}
return 0
@@ -542,7 +613,7 @@ const flagMask = flagBits - 1
// flagSet contains detected cpu features and characteristics in an array of flags
type flagSet [(lastID + flagMask) / flagBits]flags
-func (s flagSet) inSet(feat FeatureID) bool {
+func (s *flagSet) inSet(feat FeatureID) bool {
return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
}
@@ -572,7 +643,17 @@ func (s *flagSet) or(other flagSet) {
}
// hasSet returns whether all features are present.
-func (s flagSet) hasSet(other flagSet) bool {
+func (s *flagSet) hasSet(other flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != v {
+ return false
+ }
+ }
+ return true
+}
+
+// hasSet returns whether all features are present.
+func (s *flagSet) hasSetP(other *flagSet) bool {
for i, v := range other[:] {
if s[i]&v != v {
return false
@@ -581,8 +662,18 @@ func (s flagSet) hasSet(other flagSet) bool {
return true
}
+// hasOneOf returns whether one or more features are present.
+func (s *flagSet) hasOneOf(other *flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != 0 {
+ return true
+ }
+ }
+ return false
+}
+
// nEnabled will return the number of enabled flags.
-func (s flagSet) nEnabled() (n int) {
+func (s *flagSet) nEnabled() (n int) {
for _, v := range s[:] {
n += bits.OnesCount64(uint64(v))
}
@@ -677,7 +768,7 @@ func threadsPerCore() int {
if vend == AMD {
// Workaround for AMD returning 0, assume 2 if >= Zen 2
// It will be more correct than not.
- fam, _ := familyModel()
+ fam, _, _ := familyModel()
_, _, _, d := cpuid(1)
if (d&(1<<28)) != 0 && fam >= 23 {
return 2
@@ -715,14 +806,27 @@ func logicalCores() int {
}
}
-func familyModel() (int, int) {
+func familyModel() (family, model, stepping int) {
if maxFunctionID() < 0x1 {
- return 0, 0
+ return 0, 0, 0
}
eax, _, _, _ := cpuid(1)
- family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
- model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
- return int(family), int(model)
+ // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0].
+ family = int((eax >> 8) & 0xf)
+ extFam := family == 0x6 // Intel is 0x6, needs extended model.
+ if family == 0xf {
+ // Add ExtFamily
+ family += int((eax >> 20) & 0xff)
+ extFam = true
+ }
+ // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0].
+ model = int((eax >> 4) & 0xf)
+ if extFam {
+ // Add ExtModel
+ model += int((eax >> 12) & 0xf0)
+ }
+ stepping = int(eax & 0xf)
+ return family, model, stepping
}
func physicalCores() int {
@@ -857,7 +961,7 @@ func (c *CPUInfo) cacheSize() {
c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
// CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
- if maxExtendedFunction() < 0x8000001D {
+ if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) {
return
}
@@ -974,14 +1078,13 @@ func support() flagSet {
if mfi < 0x1 {
return fs
}
- family, model := familyModel()
+ family, model, _ := familyModel()
_, _, c, d := cpuid(1)
fs.setIf((d&(1<<0)) != 0, X87)
fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
- fs.setIf((d&(1<<11)) != 0, SCE)
+ fs.setIf((d&(1<<11)) != 0, SYSEE)
fs.setIf((d&(1<<15)) != 0, CMOV)
- fs.setIf((d&(1<<22)) != 0, MMXEXT)
fs.setIf((d&(1<<23)) != 0, MMX)
fs.setIf((d&(1<<24)) != 0, FXSR)
fs.setIf((d&(1<<25)) != 0, FXSROPT)
@@ -989,9 +1092,9 @@ func support() flagSet {
fs.setIf((d&(1<<26)) != 0, SSE2)
fs.setIf((c&1) != 0, SSE3)
fs.setIf((c&(1<<5)) != 0, VMX)
- fs.setIf((c&0x00000200) != 0, SSSE3)
- fs.setIf((c&0x00080000) != 0, SSE4)
- fs.setIf((c&0x00100000) != 0, SSE42)
+ fs.setIf((c&(1<<9)) != 0, SSSE3)
+ fs.setIf((c&(1<<19)) != 0, SSE4)
+ fs.setIf((c&(1<<20)) != 0, SSE42)
fs.setIf((c&(1<<25)) != 0, AESNI)
fs.setIf((c&(1<<1)) != 0, CLMUL)
fs.setIf(c&(1<<22) != 0, MOVBE)
@@ -1068,23 +1171,38 @@ func support() flagSet {
fs.setIf(ecx&(1<<30) != 0, SGXLC)
// CPUID.(EAX=7, ECX=0).EDX
+ fs.setIf(edx&(1<<4) != 0, FSRM)
+ fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL)
+ fs.setIf(edx&(1<<10) != 0, MD_CLEAR)
fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
+ fs.setIf(edx&(1<<15) != 0, HYBRID_CPU)
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
fs.setIf(edx&(1<<18) != 0, PCONFIG)
fs.setIf(edx&(1<<20) != 0, CETIBT)
fs.setIf(edx&(1<<26) != 0, IBPB)
fs.setIf(edx&(1<<27) != 0, STIBP)
+ fs.setIf(edx&(1<<28) != 0, FLUSH_L1D)
+ fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP)
+ fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
+ fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
- // CPUID.(EAX=7, ECX=1)
- eax1, _, _, _ := cpuidex(7, 1)
+ // CPUID.(EAX=7, ECX=1).EAX
+ eax1, _, _, edx1 := cpuidex(7, 1)
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
+ fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT)
fs.setIf(eax1&(1<<22) != 0, HRESET)
+ fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
fs.setIf(eax1&(1<<26) != 0, LAM)
+ // CPUID.(EAX=7, ECX=1).EDX
+ fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
+ fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
+ fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
+
// Only detect AVX-512 features if XGETBV is supported
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
// Check for OS support
@@ -1120,9 +1238,22 @@ func support() flagSet {
fs.setIf(edx&(1<<25) != 0, AMXINT8)
// eax1 = CPUID.(EAX=7, ECX=1).EAX
fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
+ fs.setIf(eax1&(1<<19) != 0, WRMSRNS)
+ fs.setIf(eax1&(1<<21) != 0, AMXFP16)
+ fs.setIf(eax1&(1<<27) != 0, MSRLIST)
}
}
+
+ // CPUID.(EAX=7, ECX=2)
+ _, _, _, edx = cpuidex(7, 2)
+ fs.setIf(edx&(1<<0) != 0, PSFD)
+ fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL)
+ fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL)
+ fs.setIf(edx&(1<<4) != 0, BHI_CTRL)
+ fs.setIf(edx&(1<<5) != 0, MCDT_NO)
+
}
+
// Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
// EAX
// Bit 00: XSAVEOPT is available.
@@ -1156,20 +1287,24 @@ func support() flagSet {
fs.setIf((c&(1<<2)) != 0, SVM)
fs.setIf((c&(1<<6)) != 0, SSE4A)
fs.setIf((c&(1<<10)) != 0, IBS)
+ fs.setIf((c&(1<<22)) != 0, TOPEXT)
// EDX
- fs.setIf((d&(1<<31)) != 0, AMD3DNOW)
- fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT)
- fs.setIf((d&(1<<23)) != 0, MMX)
- fs.setIf((d&(1<<22)) != 0, MMXEXT)
+ fs.setIf(d&(1<<11) != 0, SYSCALL)
fs.setIf(d&(1<<20) != 0, NX)
+ fs.setIf(d&(1<<22) != 0, MMXEXT)
+ fs.setIf(d&(1<<23) != 0, MMX)
+ fs.setIf(d&(1<<24) != 0, FXSR)
+ fs.setIf(d&(1<<25) != 0, FXSROPT)
fs.setIf(d&(1<<27) != 0, RDTSCP)
+ fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT)
+ fs.setIf(d&(1<<31) != 0, AMD3DNOW)
/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
* used unless the OS has AVX support. */
if fs.inSet(AVX) {
- fs.setIf((c&0x00000800) != 0, XOP)
- fs.setIf((c&0x00010000) != 0, FMA4)
+ fs.setIf((c&(1<<11)) != 0, XOP)
+ fs.setIf((c&(1<<16)) != 0, FMA4)
}
}
@@ -1183,9 +1318,21 @@ func support() flagSet {
if maxExtendedFunction() >= 0x80000008 {
_, b, _, _ := cpuid(0x80000008)
+ fs.setIf(b&(1<<28) != 0, PSFD)
+ fs.setIf(b&(1<<27) != 0, CPPC)
+ fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD)
+ fs.setIf(b&(1<<23) != 0, PPIN)
+ fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED)
+ fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS)
+ fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP)
+ fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED)
+ fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON)
+ fs.setIf(b&(1<<15) != 0, STIBP)
+ fs.setIf(b&(1<<14) != 0, IBRS)
+ fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
+ fs.setIf(b&(1<<12) != 0, IBPB)
fs.setIf((b&(1<<9)) != 0, WBNOINVD)
fs.setIf((b&(1<<8)) != 0, MCOMMIT)
- fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
fs.setIf((b&(1<<4)) != 0, RDPRU)
fs.setIf((b&(1<<3)) != 0, INVLPGB)
fs.setIf((b&(1<<1)) != 0, MSRIRC)
@@ -1206,6 +1353,13 @@ func support() flagSet {
fs.setIf((edx>>12)&1 == 1, SVMPFT)
}
+ if maxExtendedFunction() >= 0x8000001a {
+ eax, _, _, _ := cpuid(0x8000001a)
+ fs.setIf((eax>>0)&1 == 1, FP128)
+ fs.setIf((eax>>1)&1 == 1, MOVU)
+ fs.setIf((eax>>2)&1 == 1, FP256)
+ }
+
if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
eax, _, _, _ := cpuid(0x8000001b)
fs.setIf((eax>>0)&1 == 1, IBSFFV)
@@ -1216,6 +1370,10 @@ func support() flagSet {
fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
+ fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE)
+ fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX)
+ fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1.
+ fs.setIf((eax>>11)&1 == 1, IBS_ZEN4)
}
if maxExtendedFunction() >= 0x8000001f && vend == AMD {
@@ -1236,6 +1394,13 @@ func support() flagSet {
fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
}
+ if mfi >= 0x21 {
+ // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
+ _, ebx, ecx, edx := cpuid(0x21)
+ identity := string(valAsString(ebx, edx, ecx))
+ fs.setIf(identity == "IntelTDX ", TDX_GUEST)
+ }
+
return fs
}
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
index 35678d8a3..c946824ec 100644
--- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -24,7 +24,7 @@ func addInfo(c *CPUInfo, safe bool) {
c.maxExFunc = maxExtendedFunction()
c.BrandName = brandName()
c.CacheLine = cacheLine()
- c.Family, c.Model = familyModel()
+ c.Family, c.Model, c.Stepping = familyModel()
c.featureSet = support()
c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
c.ThreadsPerCore = threadsPerCore()
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
index a9b3e36c7..024c706af 100644
--- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -13,174 +13,213 @@ func _() {
_ = x[AMD3DNOW-3]
_ = x[AMD3DNOWEXT-4]
_ = x[AMXBF16-5]
- _ = x[AMXINT8-6]
- _ = x[AMXTILE-7]
- _ = x[AVX-8]
- _ = x[AVX2-9]
- _ = x[AVX512BF16-10]
- _ = x[AVX512BITALG-11]
- _ = x[AVX512BW-12]
- _ = x[AVX512CD-13]
- _ = x[AVX512DQ-14]
- _ = x[AVX512ER-15]
- _ = x[AVX512F-16]
- _ = x[AVX512FP16-17]
- _ = x[AVX512IFMA-18]
- _ = x[AVX512PF-19]
- _ = x[AVX512VBMI-20]
- _ = x[AVX512VBMI2-21]
- _ = x[AVX512VL-22]
- _ = x[AVX512VNNI-23]
- _ = x[AVX512VP2INTERSECT-24]
- _ = x[AVX512VPOPCNTDQ-25]
- _ = x[AVXSLOW-26]
- _ = x[AVXVNNI-27]
- _ = x[BMI1-28]
- _ = x[BMI2-29]
- _ = x[CETIBT-30]
- _ = x[CETSS-31]
- _ = x[CLDEMOTE-32]
- _ = x[CLMUL-33]
- _ = x[CLZERO-34]
- _ = x[CMOV-35]
- _ = x[CMPSB_SCADBS_SHORT-36]
- _ = x[CMPXCHG8-37]
- _ = x[CPBOOST-38]
- _ = x[CX16-39]
- _ = x[ENQCMD-40]
- _ = x[ERMS-41]
- _ = x[F16C-42]
- _ = x[FMA3-43]
- _ = x[FMA4-44]
- _ = x[FXSR-45]
- _ = x[FXSROPT-46]
- _ = x[GFNI-47]
- _ = x[HLE-48]
- _ = x[HRESET-49]
- _ = x[HTT-50]
- _ = x[HWA-51]
- _ = x[HYPERVISOR-52]
- _ = x[IBPB-53]
- _ = x[IBS-54]
- _ = x[IBSBRNTRGT-55]
- _ = x[IBSFETCHSAM-56]
- _ = x[IBSFFV-57]
- _ = x[IBSOPCNT-58]
- _ = x[IBSOPCNTEXT-59]
- _ = x[IBSOPSAM-60]
- _ = x[IBSRDWROPCNT-61]
- _ = x[IBSRIPINVALIDCHK-62]
- _ = x[IBS_PREVENTHOST-63]
- _ = x[INT_WBINVD-64]
- _ = x[INVLPGB-65]
- _ = x[LAHF-66]
- _ = x[LAM-67]
- _ = x[LBRVIRT-68]
- _ = x[LZCNT-69]
- _ = x[MCAOVERFLOW-70]
- _ = x[MCOMMIT-71]
- _ = x[MMX-72]
- _ = x[MMXEXT-73]
- _ = x[MOVBE-74]
- _ = x[MOVDIR64B-75]
- _ = x[MOVDIRI-76]
- _ = x[MOVSB_ZL-77]
- _ = x[MPX-78]
- _ = x[MSRIRC-79]
- _ = x[MSR_PAGEFLUSH-80]
- _ = x[NRIPS-81]
- _ = x[NX-82]
- _ = x[OSXSAVE-83]
- _ = x[PCONFIG-84]
- _ = x[POPCNT-85]
- _ = x[RDPRU-86]
- _ = x[RDRAND-87]
- _ = x[RDSEED-88]
- _ = x[RDTSCP-89]
- _ = x[RTM-90]
- _ = x[RTM_ALWAYS_ABORT-91]
- _ = x[SCE-92]
- _ = x[SERIALIZE-93]
- _ = x[SEV-94]
- _ = x[SEV_64BIT-95]
- _ = x[SEV_ALTERNATIVE-96]
- _ = x[SEV_DEBUGSWAP-97]
- _ = x[SEV_ES-98]
- _ = x[SEV_RESTRICTED-99]
- _ = x[SEV_SNP-100]
- _ = x[SGX-101]
- _ = x[SGXLC-102]
- _ = x[SHA-103]
- _ = x[SME-104]
- _ = x[SME_COHERENT-105]
- _ = x[SSE-106]
- _ = x[SSE2-107]
- _ = x[SSE3-108]
- _ = x[SSE4-109]
- _ = x[SSE42-110]
- _ = x[SSE4A-111]
- _ = x[SSSE3-112]
- _ = x[STIBP-113]
- _ = x[STOSB_SHORT-114]
- _ = x[SUCCOR-115]
- _ = x[SVM-116]
- _ = x[SVMDA-117]
- _ = x[SVMFBASID-118]
- _ = x[SVML-119]
- _ = x[SVMNP-120]
- _ = x[SVMPF-121]
- _ = x[SVMPFT-122]
- _ = x[TBM-123]
- _ = x[TME-124]
- _ = x[TSCRATEMSR-125]
- _ = x[TSXLDTRK-126]
- _ = x[VAES-127]
- _ = x[VMCBCLEAN-128]
- _ = x[VMPL-129]
- _ = x[VMSA_REGPROT-130]
- _ = x[VMX-131]
- _ = x[VPCLMULQDQ-132]
- _ = x[VTE-133]
- _ = x[WAITPKG-134]
- _ = x[WBNOINVD-135]
- _ = x[X87-136]
- _ = x[XGETBV1-137]
- _ = x[XOP-138]
- _ = x[XSAVE-139]
- _ = x[XSAVEC-140]
- _ = x[XSAVEOPT-141]
- _ = x[XSAVES-142]
- _ = x[AESARM-143]
- _ = x[ARMCPUID-144]
- _ = x[ASIMD-145]
- _ = x[ASIMDDP-146]
- _ = x[ASIMDHP-147]
- _ = x[ASIMDRDM-148]
- _ = x[ATOMICS-149]
- _ = x[CRC32-150]
- _ = x[DCPOP-151]
- _ = x[EVTSTRM-152]
- _ = x[FCMA-153]
- _ = x[FP-154]
- _ = x[FPHP-155]
- _ = x[GPA-156]
- _ = x[JSCVT-157]
- _ = x[LRCPC-158]
- _ = x[PMULL-159]
- _ = x[SHA1-160]
- _ = x[SHA2-161]
- _ = x[SHA3-162]
- _ = x[SHA512-163]
- _ = x[SM3-164]
- _ = x[SM4-165]
- _ = x[SVE-166]
- _ = x[lastID-167]
+ _ = x[AMXFP16-6]
+ _ = x[AMXINT8-7]
+ _ = x[AMXTILE-8]
+ _ = x[AVX-9]
+ _ = x[AVX2-10]
+ _ = x[AVX512BF16-11]
+ _ = x[AVX512BITALG-12]
+ _ = x[AVX512BW-13]
+ _ = x[AVX512CD-14]
+ _ = x[AVX512DQ-15]
+ _ = x[AVX512ER-16]
+ _ = x[AVX512F-17]
+ _ = x[AVX512FP16-18]
+ _ = x[AVX512IFMA-19]
+ _ = x[AVX512PF-20]
+ _ = x[AVX512VBMI-21]
+ _ = x[AVX512VBMI2-22]
+ _ = x[AVX512VL-23]
+ _ = x[AVX512VNNI-24]
+ _ = x[AVX512VP2INTERSECT-25]
+ _ = x[AVX512VPOPCNTDQ-26]
+ _ = x[AVXIFMA-27]
+ _ = x[AVXNECONVERT-28]
+ _ = x[AVXSLOW-29]
+ _ = x[AVXVNNI-30]
+ _ = x[AVXVNNIINT8-31]
+ _ = x[BHI_CTRL-32]
+ _ = x[BMI1-33]
+ _ = x[BMI2-34]
+ _ = x[CETIBT-35]
+ _ = x[CETSS-36]
+ _ = x[CLDEMOTE-37]
+ _ = x[CLMUL-38]
+ _ = x[CLZERO-39]
+ _ = x[CMOV-40]
+ _ = x[CMPCCXADD-41]
+ _ = x[CMPSB_SCADBS_SHORT-42]
+ _ = x[CMPXCHG8-43]
+ _ = x[CPBOOST-44]
+ _ = x[CPPC-45]
+ _ = x[CX16-46]
+ _ = x[EFER_LMSLE_UNS-47]
+ _ = x[ENQCMD-48]
+ _ = x[ERMS-49]
+ _ = x[F16C-50]
+ _ = x[FLUSH_L1D-51]
+ _ = x[FMA3-52]
+ _ = x[FMA4-53]
+ _ = x[FP128-54]
+ _ = x[FP256-55]
+ _ = x[FSRM-56]
+ _ = x[FXSR-57]
+ _ = x[FXSROPT-58]
+ _ = x[GFNI-59]
+ _ = x[HLE-60]
+ _ = x[HRESET-61]
+ _ = x[HTT-62]
+ _ = x[HWA-63]
+ _ = x[HYBRID_CPU-64]
+ _ = x[HYPERVISOR-65]
+ _ = x[IA32_ARCH_CAP-66]
+ _ = x[IA32_CORE_CAP-67]
+ _ = x[IBPB-68]
+ _ = x[IBRS-69]
+ _ = x[IBRS_PREFERRED-70]
+ _ = x[IBRS_PROVIDES_SMP-71]
+ _ = x[IBS-72]
+ _ = x[IBSBRNTRGT-73]
+ _ = x[IBSFETCHSAM-74]
+ _ = x[IBSFFV-75]
+ _ = x[IBSOPCNT-76]
+ _ = x[IBSOPCNTEXT-77]
+ _ = x[IBSOPSAM-78]
+ _ = x[IBSRDWROPCNT-79]
+ _ = x[IBSRIPINVALIDCHK-80]
+ _ = x[IBS_FETCH_CTLX-81]
+ _ = x[IBS_OPDATA4-82]
+ _ = x[IBS_OPFUSE-83]
+ _ = x[IBS_PREVENTHOST-84]
+ _ = x[IBS_ZEN4-85]
+ _ = x[IDPRED_CTRL-86]
+ _ = x[INT_WBINVD-87]
+ _ = x[INVLPGB-88]
+ _ = x[LAHF-89]
+ _ = x[LAM-90]
+ _ = x[LBRVIRT-91]
+ _ = x[LZCNT-92]
+ _ = x[MCAOVERFLOW-93]
+ _ = x[MCDT_NO-94]
+ _ = x[MCOMMIT-95]
+ _ = x[MD_CLEAR-96]
+ _ = x[MMX-97]
+ _ = x[MMXEXT-98]
+ _ = x[MOVBE-99]
+ _ = x[MOVDIR64B-100]
+ _ = x[MOVDIRI-101]
+ _ = x[MOVSB_ZL-102]
+ _ = x[MOVU-103]
+ _ = x[MPX-104]
+ _ = x[MSRIRC-105]
+ _ = x[MSRLIST-106]
+ _ = x[MSR_PAGEFLUSH-107]
+ _ = x[NRIPS-108]
+ _ = x[NX-109]
+ _ = x[OSXSAVE-110]
+ _ = x[PCONFIG-111]
+ _ = x[POPCNT-112]
+ _ = x[PPIN-113]
+ _ = x[PREFETCHI-114]
+ _ = x[PSFD-115]
+ _ = x[RDPRU-116]
+ _ = x[RDRAND-117]
+ _ = x[RDSEED-118]
+ _ = x[RDTSCP-119]
+ _ = x[RRSBA_CTRL-120]
+ _ = x[RTM-121]
+ _ = x[RTM_ALWAYS_ABORT-122]
+ _ = x[SERIALIZE-123]
+ _ = x[SEV-124]
+ _ = x[SEV_64BIT-125]
+ _ = x[SEV_ALTERNATIVE-126]
+ _ = x[SEV_DEBUGSWAP-127]
+ _ = x[SEV_ES-128]
+ _ = x[SEV_RESTRICTED-129]
+ _ = x[SEV_SNP-130]
+ _ = x[SGX-131]
+ _ = x[SGXLC-132]
+ _ = x[SHA-133]
+ _ = x[SME-134]
+ _ = x[SME_COHERENT-135]
+ _ = x[SPEC_CTRL_SSBD-136]
+ _ = x[SRBDS_CTRL-137]
+ _ = x[SSE-138]
+ _ = x[SSE2-139]
+ _ = x[SSE3-140]
+ _ = x[SSE4-141]
+ _ = x[SSE42-142]
+ _ = x[SSE4A-143]
+ _ = x[SSSE3-144]
+ _ = x[STIBP-145]
+ _ = x[STIBP_ALWAYSON-146]
+ _ = x[STOSB_SHORT-147]
+ _ = x[SUCCOR-148]
+ _ = x[SVM-149]
+ _ = x[SVMDA-150]
+ _ = x[SVMFBASID-151]
+ _ = x[SVML-152]
+ _ = x[SVMNP-153]
+ _ = x[SVMPF-154]
+ _ = x[SVMPFT-155]
+ _ = x[SYSCALL-156]
+ _ = x[SYSEE-157]
+ _ = x[TBM-158]
+ _ = x[TDX_GUEST-159]
+ _ = x[TLB_FLUSH_NESTED-160]
+ _ = x[TME-161]
+ _ = x[TOPEXT-162]
+ _ = x[TSCRATEMSR-163]
+ _ = x[TSXLDTRK-164]
+ _ = x[VAES-165]
+ _ = x[VMCBCLEAN-166]
+ _ = x[VMPL-167]
+ _ = x[VMSA_REGPROT-168]
+ _ = x[VMX-169]
+ _ = x[VPCLMULQDQ-170]
+ _ = x[VTE-171]
+ _ = x[WAITPKG-172]
+ _ = x[WBNOINVD-173]
+ _ = x[WRMSRNS-174]
+ _ = x[X87-175]
+ _ = x[XGETBV1-176]
+ _ = x[XOP-177]
+ _ = x[XSAVE-178]
+ _ = x[XSAVEC-179]
+ _ = x[XSAVEOPT-180]
+ _ = x[XSAVES-181]
+ _ = x[AESARM-182]
+ _ = x[ARMCPUID-183]
+ _ = x[ASIMD-184]
+ _ = x[ASIMDDP-185]
+ _ = x[ASIMDHP-186]
+ _ = x[ASIMDRDM-187]
+ _ = x[ATOMICS-188]
+ _ = x[CRC32-189]
+ _ = x[DCPOP-190]
+ _ = x[EVTSTRM-191]
+ _ = x[FCMA-192]
+ _ = x[FP-193]
+ _ = x[FPHP-194]
+ _ = x[GPA-195]
+ _ = x[JSCVT-196]
+ _ = x[LRCPC-197]
+ _ = x[PMULL-198]
+ _ = x[SHA1-199]
+ _ = x[SHA2-200]
+ _ = x[SHA3-201]
+ _ = x[SHA512-202]
+ _ = x[SM3-203]
+ _ = x[SM4-204]
+ _ = x[SVE-205]
+ _ = x[lastID-206]
_ = x[firstID-0]
}
-const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
-var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107}
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
index d91d02109..84b1acd21 100644
--- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
+++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
@@ -83,7 +83,7 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
c.Model = sysctlGetInt(0, "machdep.cpu.model")
c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize")
c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize")
- c.Cache.L1D = sysctlGetInt64(-1, "hw.l1icachesize")
+ c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize")
c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
diff --git a/vendor/github.com/koron/go-ssdp/Makefile b/vendor/github.com/koron/go-ssdp/Makefile
index 7303aa1be..a077c5a2d 100644
--- a/vendor/github.com/koron/go-ssdp/Makefile
+++ b/vendor/github.com/koron/go-ssdp/Makefile
@@ -6,11 +6,11 @@ build:
.PHONY: test
test:
- go test ./...
+ go test -gcflags '-e' ./...
-.PHONY: test-race
-test-race:
- go test -race .
+.PHONY: bench
+bench:
+ go test -bench ./...
.PHONY: tags
tags:
@@ -19,20 +19,16 @@ tags:
.PHONY: cover
cover:
mkdir -p tmp
- go test -coverprofile tmp/_cover.out .
+ go test -coverprofile tmp/_cover.out . ./internal/...
go tool cover -html tmp/_cover.out -o tmp/cover.html
.PHONY: checkall
-checkall: vet lint staticcheck
+checkall: vet staticcheck
.PHONY: vet
vet:
go vet ./...
-.PHONY: lint
-lint:
- golint ./...
-
.PHONY: staticcheck
staticcheck:
staticcheck ./...
@@ -45,6 +41,10 @@ clean: examples-clean
# based on: github.com/koron-go/_skeleton/Makefile
+.PHONY: test-race
+test-race:
+ go test -race .
+
.PHONY: examples
examples: examples-build
diff --git a/vendor/github.com/koron/go-ssdp/advertise.go b/vendor/github.com/koron/go-ssdp/advertise.go
index 81d94b73a..e64fcde59 100644
--- a/vendor/github.com/koron/go-ssdp/advertise.go
+++ b/vendor/github.com/koron/go-ssdp/advertise.go
@@ -8,42 +8,50 @@ import (
"net"
"net/http"
"sync"
+
+ "github.com/koron/go-ssdp/internal/multicast"
+ "github.com/koron/go-ssdp/internal/ssdplog"
)
type message struct {
to net.Addr
- data []byte
+ data multicast.DataProvider
}
// Advertiser is a server to advertise a service.
type Advertiser struct {
- st string
- usn string
- location string
- server string
- maxAge int
+ st string
+ usn string
+ locProv LocationProvider
+ server string
+ maxAge int
- conn *multicastConn
+ conn *multicast.Conn
ch chan *message
wg sync.WaitGroup
wgS sync.WaitGroup
}
// Advertise starts advertisement of service.
-func Advertise(st, usn, location, server string, maxAge int) (*Advertiser, error) {
- conn, err := multicastListen(recvAddrResolver)
+// location should be a string or a ssdp.LocationProvider.
+func Advertise(st, usn string, location interface{}, server string, maxAge int) (*Advertiser, error) {
+ locProv, err := toLocationProvider(location)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := multicast.Listen(multicast.RecvAddrResolver)
if err != nil {
return nil, err
}
- logf("SSDP advertise on: %s", conn.LocalAddr().String())
+ ssdplog.Printf("SSDP advertise on: %s", conn.LocalAddr().String())
a := &Advertiser{
- st: st,
- usn: usn,
- location: location,
- server: server,
- maxAge: maxAge,
- conn: conn,
- ch: make(chan *message),
+ st: st,
+ usn: usn,
+ locProv: locProv,
+ server: server,
+ maxAge: maxAge,
+ conn: conn,
+ ch: make(chan *message),
}
a.wg.Add(2)
a.wgS.Add(1)
@@ -60,9 +68,10 @@ func Advertise(st, usn, location, server string, maxAge int) (*Advertiser, error
}
func (a *Advertiser) recvMain() error {
- err := a.conn.readPackets(0, func(addr net.Addr, data []byte) error {
+ // TODO: update listening interfaces of a.conn
+ err := a.conn.ReadPackets(0, func(addr net.Addr, data []byte) error {
if err := a.handleRaw(addr, data); err != nil {
- logf("failed to handle message: %s", err)
+ ssdplog.Printf("failed to handle message: %s", err)
}
return nil
})
@@ -72,16 +81,13 @@ func (a *Advertiser) recvMain() error {
return nil
}
-func (a *Advertiser) sendMain() error {
+func (a *Advertiser) sendMain() {
for msg := range a.ch {
_, err := a.conn.WriteTo(msg.data, msg.to)
if err != nil {
- if nerr, ok := err.(net.Error); !ok || !nerr.Temporary() {
- logf("failed to send: %s", err)
- }
+ ssdplog.Printf("failed to send: %s", err)
}
}
- return nil
}
func (a *Advertiser) handleRaw(from net.Addr, raw []byte) error {
@@ -104,19 +110,16 @@ func (a *Advertiser) handleRaw(from net.Addr, raw []byte) error {
// skip when ST is not matched/expected.
return nil
}
- logf("received M-SEARCH MAN=%s ST=%s from %s", man, st, from.String())
+ ssdplog.Printf("received M-SEARCH MAN=%s ST=%s from %s", man, st, from.String())
// build and send a response.
- msg, err := buildOK(a.st, a.usn, a.location, a.server, a.maxAge)
- if err != nil {
- return err
- }
- a.ch <- &message{to: from, data: msg}
+ msg := buildOK(a.st, a.usn, a.locProv.Location(from, nil), a.server, a.maxAge)
+ a.ch <- &message{to: from, data: multicast.BytesDataProvider(msg)}
return nil
}
-func buildOK(st, usn, location, server string, maxAge int) ([]byte, error) {
+func buildOK(st, usn, location, server string, maxAge int) []byte {
+ // bytes.Buffer#Write() is never fail, so we can omit error checks.
b := new(bytes.Buffer)
- // FIXME: error should be checked.
b.WriteString("HTTP/1.1 200 OK\r\n")
fmt.Fprintf(b, "EXT: \r\n")
fmt.Fprintf(b, "ST: %s\r\n", st)
@@ -129,7 +132,7 @@ func buildOK(st, usn, location, server string, maxAge int) ([]byte, error) {
}
fmt.Fprintf(b, "CACHE-CONTROL: max-age=%d\r\n", maxAge)
b.WriteString("\r\n")
- return b.Bytes(), nil
+ return b.Bytes()
}
// Close stops advertisement.
@@ -149,23 +152,26 @@ func (a *Advertiser) Close() error {
// Alive announces ssdp:alive message.
func (a *Advertiser) Alive() error {
- addr, err := multicastSendAddr()
+ addr, err := multicast.SendAddr()
if err != nil {
return err
}
- msg, err := buildAlive(addr, a.st, a.usn, a.location, a.server,
- a.maxAge)
- if err != nil {
- return err
+ msg := &aliveDataProvider{
+ host: addr,
+ nt: a.st,
+ usn: a.usn,
+ location: a.locProv,
+ server: a.server,
+ maxAge: a.maxAge,
}
a.ch <- &message{to: addr, data: msg}
- logf("sent alive")
+ ssdplog.Printf("sent alive")
return nil
}
// Bye announces ssdp:byebye message.
func (a *Advertiser) Bye() error {
- addr, err := multicastSendAddr()
+ addr, err := multicast.SendAddr()
if err != nil {
return err
}
@@ -173,7 +179,7 @@ func (a *Advertiser) Bye() error {
if err != nil {
return err
}
- a.ch <- &message{to: addr, data: msg}
- logf("sent bye")
+ a.ch <- &message{to: addr, data: multicast.BytesDataProvider(msg)}
+ ssdplog.Printf("sent bye")
return nil
}
diff --git a/vendor/github.com/koron/go-ssdp/announce.go b/vendor/github.com/koron/go-ssdp/announce.go
index b1c2008c4..9874d01fb 100644
--- a/vendor/github.com/koron/go-ssdp/announce.go
+++ b/vendor/github.com/koron/go-ssdp/announce.go
@@ -4,24 +4,35 @@ import (
"bytes"
"fmt"
"net"
+
+ "github.com/koron/go-ssdp/internal/multicast"
)
// AnnounceAlive sends ssdp:alive message.
-func AnnounceAlive(nt, usn, location, server string, maxAge int, localAddr string) error {
+// location should be a string or a ssdp.LocationProvider.
+func AnnounceAlive(nt, usn string, location interface{}, server string, maxAge int, localAddr string) error {
+ locProv, err := toLocationProvider(location)
+ if err != nil {
+ return err
+ }
// dial multicast UDP packet.
- conn, err := multicastListen(&udpAddrResolver{addr: localAddr})
+ conn, err := multicast.Listen(&multicast.AddrResolver{Addr: localAddr})
if err != nil {
return err
}
defer conn.Close()
// build and send message.
- addr, err := multicastSendAddr()
+ addr, err := multicast.SendAddr()
if err != nil {
return err
}
- msg, err := buildAlive(addr, nt, usn, location, server, maxAge)
- if err != nil {
- return err
+ msg := &aliveDataProvider{
+ host: addr,
+ nt: nt,
+ usn: usn,
+ location: locProv,
+ server: server,
+ maxAge: maxAge,
}
if _, err := conn.WriteTo(msg, addr); err != nil {
return err
@@ -29,9 +40,24 @@ func AnnounceAlive(nt, usn, location, server string, maxAge int, localAddr strin
return nil
}
-func buildAlive(raddr net.Addr, nt, usn, location, server string, maxAge int) ([]byte, error) {
+type aliveDataProvider struct {
+ host net.Addr
+ nt string
+ usn string
+ location LocationProvider
+ server string
+ maxAge int
+}
+
+func (p *aliveDataProvider) Bytes(ifi *net.Interface) []byte {
+ return buildAlive(p.host, p.nt, p.usn, p.location.Location(nil, ifi), p.server, p.maxAge)
+}
+
+var _ multicast.DataProvider = (*aliveDataProvider)(nil)
+
+func buildAlive(raddr net.Addr, nt, usn, location, server string, maxAge int) []byte {
+ // bytes.Buffer#Write() is never fail, so we can omit error checks.
b := new(bytes.Buffer)
- // FIXME: error should be checked.
b.WriteString("NOTIFY * HTTP/1.1\r\n")
fmt.Fprintf(b, "HOST: %s\r\n", raddr.String())
fmt.Fprintf(b, "NT: %s\r\n", nt)
@@ -45,19 +71,19 @@ func buildAlive(raddr net.Addr, nt, usn, location, server string, maxAge int) ([
}
fmt.Fprintf(b, "CACHE-CONTROL: max-age=%d\r\n", maxAge)
b.WriteString("\r\n")
- return b.Bytes(), nil
+ return b.Bytes()
}
// AnnounceBye sends ssdp:byebye message.
func AnnounceBye(nt, usn, localAddr string) error {
// dial multicast UDP packet.
- conn, err := multicastListen(&udpAddrResolver{addr: localAddr})
+ conn, err := multicast.Listen(&multicast.AddrResolver{Addr: localAddr})
if err != nil {
return err
}
defer conn.Close()
// build and send message.
- addr, err := multicastSendAddr()
+ addr, err := multicast.SendAddr()
if err != nil {
return err
}
@@ -65,7 +91,7 @@ func AnnounceBye(nt, usn, localAddr string) error {
if err != nil {
return err
}
- if _, err := conn.WriteTo(msg, addr); err != nil {
+ if _, err := conn.WriteTo(multicast.BytesDataProvider(msg), addr); err != nil {
return err
}
return nil
diff --git a/vendor/github.com/koron/go-ssdp/doc.go b/vendor/github.com/koron/go-ssdp/doc.go
index a1534868b..d4dfd4b5d 100644
--- a/vendor/github.com/koron/go-ssdp/doc.go
+++ b/vendor/github.com/koron/go-ssdp/doc.go
@@ -1,4 +1,4 @@
/*
-Package ssdp provides ...
+Package ssdp provides SSDP advertiser or so.
*/
package ssdp
diff --git a/vendor/github.com/koron/go-ssdp/internal/multicast/doc.go b/vendor/github.com/koron/go-ssdp/internal/multicast/doc.go
new file mode 100644
index 000000000..96d419f3c
--- /dev/null
+++ b/vendor/github.com/koron/go-ssdp/internal/multicast/doc.go
@@ -0,0 +1,4 @@
+/*
+Package multicast provides utilities for network multicast.
+*/
+package multicast
diff --git a/vendor/github.com/koron/go-ssdp/interface.go b/vendor/github.com/koron/go-ssdp/internal/multicast/interface.go
similarity index 60%
rename from vendor/github.com/koron/go-ssdp/interface.go
rename to vendor/github.com/koron/go-ssdp/internal/multicast/interface.go
index 6907e3788..88fd5760f 100644
--- a/vendor/github.com/koron/go-ssdp/interface.go
+++ b/vendor/github.com/koron/go-ssdp/internal/multicast/interface.go
@@ -1,33 +1,23 @@
-package ssdp
+package multicast
import (
"net"
- "sync"
)
-// Interfaces specify target interfaces to multicast. If no interfaces are
-// specified, all interfaces will be used.
-var Interfaces []net.Interface
+type InterfacesProviderFunc func() []net.Interface
-var ifLock sync.Mutex
-var ifList []net.Interface
+// InterfacesProvider specify a function to list all interfaces to multicast.
+// If no provider are given, all possible interfaces will be used.
+var InterfacesProvider InterfacesProviderFunc
// interfaces gets list of net.Interface to multicast UDP packet.
func interfaces() ([]net.Interface, error) {
- ifLock.Lock()
- defer ifLock.Unlock()
- if len(Interfaces) > 0 {
- return Interfaces, nil
- }
- if len(ifList) > 0 {
- return ifList, nil
- }
- l, err := interfacesIPv4()
- if err != nil {
- return nil, err
+ if p := InterfacesProvider; p != nil {
+ if list := p(); len(list) > 0 {
+ return list, nil
+ }
}
- ifList = l
- return ifList, nil
+ return interfacesIPv4()
}
// interfacesIPv4 lists net.Interface on IPv4.
@@ -38,7 +28,7 @@ func interfacesIPv4() ([]net.Interface, error) {
}
list := make([]net.Interface, 0, len(iflist))
for _, ifi := range iflist {
- if !hasLinkUp(&ifi) || !hasIPv4Address(&ifi) {
+ if !hasLinkUp(&ifi) || !hasMulticast(&ifi) || !hasIPv4Address(&ifi) {
continue
}
list = append(list, ifi)
@@ -51,6 +41,11 @@ func hasLinkUp(ifi *net.Interface) bool {
return ifi.Flags&net.FlagUp != 0
}
+// hasMulticast checks an I/F supports multicast or not.
+func hasMulticast(ifi *net.Interface) bool {
+ return ifi.Flags&net.FlagMulticast != 0
+}
+
// hasIPv4Address checks an I/F have IPv4 address.
func hasIPv4Address(ifi *net.Interface) bool {
addrs, err := ifi.Addrs()
diff --git a/vendor/github.com/koron/go-ssdp/multicast.go b/vendor/github.com/koron/go-ssdp/internal/multicast/multicast.go
similarity index 61%
rename from vendor/github.com/koron/go-ssdp/multicast.go
rename to vendor/github.com/koron/go-ssdp/internal/multicast/multicast.go
index 442024983..9a97353ca 100644
--- a/vendor/github.com/koron/go-ssdp/multicast.go
+++ b/vendor/github.com/koron/go-ssdp/internal/multicast/multicast.go
@@ -1,4 +1,4 @@
-package ssdp
+package multicast
import (
"errors"
@@ -7,17 +7,20 @@ import (
"strings"
"time"
+ "github.com/koron/go-ssdp/internal/ssdplog"
"golang.org/x/net/ipv4"
)
-type multicastConn struct {
+// Conn is multicast connection.
+type Conn struct {
laddr *net.UDPAddr
conn *net.UDPConn
pconn *ipv4.PacketConn
iflist []net.Interface
}
-func multicastListen(r *udpAddrResolver) (*multicastConn, error) {
+// Listen starts to receiving multicast messages.
+func Listen(r *AddrResolver) (*Conn, error) {
// prepare parameters.
laddr, err := r.resolve()
if err != nil {
@@ -34,7 +37,7 @@ func multicastListen(r *udpAddrResolver) (*multicastConn, error) {
conn.Close()
return nil, err
}
- return &multicastConn{
+ return &Conn{
laddr: laddr,
conn: conn,
pconn: pconn,
@@ -47,7 +50,7 @@ func newIPv4MulticastConn(conn *net.UDPConn) (*ipv4.PacketConn, []net.Interface,
if err != nil {
return nil, nil, err
}
- addr, err := multicastSendAddr()
+ addr, err := SendAddr()
if err != nil {
return nil, nil, err
}
@@ -66,11 +69,11 @@ func joinGroupIPv4(conn *net.UDPConn, iflist []net.Interface, gaddr net.Addr) (*
joined := 0
for _, ifi := range iflist {
if err := wrap.JoinGroup(&ifi, gaddr); err != nil {
- logf("failed to join group %s on %s: %s", gaddr.String(), ifi.Name, err)
+ ssdplog.Printf("failed to join group %s on %s: %s", gaddr.String(), ifi.Name, err)
continue
}
joined++
- logf("joined group %s on %s", gaddr.String(), ifi.Name)
+ ssdplog.Printf("joined group %s on %s (#%d)", gaddr.String(), ifi.Name, ifi.Index)
}
if joined == 0 {
return nil, errors.New("no interfaces had joined to group")
@@ -78,7 +81,8 @@ func joinGroupIPv4(conn *net.UDPConn, iflist []net.Interface, gaddr net.Addr) (*
return wrap, nil
}
-func (mc *multicastConn) Close() error {
+// Close closes a multicast connection.
+func (mc *Conn) Close() error {
if err := mc.pconn.Close(); err != nil {
return err
}
@@ -86,26 +90,49 @@ func (mc *multicastConn) Close() error {
return nil
}
-func (mc *multicastConn) WriteTo(data []byte, to net.Addr) (int, error) {
+// DataProvider provides a body of multicast message to send.
+type DataProvider interface {
+ Bytes(*net.Interface) []byte
+}
+
+//type multicastDataProviderFunc func(*net.Interface) []byte
+//
+//func (f multicastDataProviderFunc) Bytes(ifi *net.Interface) []byte {
+// return f(ifi)
+//}
+
+type BytesDataProvider []byte
+
+func (b BytesDataProvider) Bytes(ifi *net.Interface) []byte {
+ return []byte(b)
+}
+
+// WriteTo sends a multicast message to interfaces.
+func (mc *Conn) WriteTo(dataProv DataProvider, to net.Addr) (int, error) {
if uaddr, ok := to.(*net.UDPAddr); ok && !uaddr.IP.IsMulticast() {
- return mc.conn.WriteTo(data, to)
+ return mc.conn.WriteTo(dataProv.Bytes(nil), to)
}
+ sum := 0
for _, ifi := range mc.iflist {
if err := mc.pconn.SetMulticastInterface(&ifi); err != nil {
return 0, err
}
- if _, err := mc.pconn.WriteTo(data, nil, to); err != nil {
+ n, err := mc.pconn.WriteTo(dataProv.Bytes(&ifi), nil, to)
+ if err != nil {
return 0, err
}
+ sum += n
}
- return len(data), nil
+ return sum, nil
}
-func (mc *multicastConn) LocalAddr() net.Addr {
+// LocalAddr returns local address to listen multicast packets.
+func (mc *Conn) LocalAddr() net.Addr {
return mc.laddr
}
-func (mc *multicastConn) readPackets(timeout time.Duration, h packetHandler) error {
+// ReadPackets reads multicast packets.
+func (mc *Conn) ReadPackets(timeout time.Duration, h PacketHandler) error {
buf := make([]byte, 65535)
if timeout > 0 {
mc.pconn.SetReadDeadline(time.Now().Add(timeout))
diff --git a/vendor/github.com/koron/go-ssdp/internal/multicast/udp.go b/vendor/github.com/koron/go-ssdp/internal/multicast/udp.go
new file mode 100644
index 000000000..2d9b7d79c
--- /dev/null
+++ b/vendor/github.com/koron/go-ssdp/internal/multicast/udp.go
@@ -0,0 +1,65 @@
+package multicast
+
+import (
+ "net"
+ "sync"
+)
+
+type PacketHandler func(net.Addr, []byte) error
+
+type AddrResolver struct {
+ Addr string
+
+ mu sync.RWMutex
+ udp *net.UDPAddr
+ err error
+}
+
+func (r *AddrResolver) setAddress(addr string) {
+ r.mu.Lock()
+ r.Addr = addr
+ r.udp = nil
+ r.err = nil
+ r.mu.Unlock()
+}
+
+func (r *AddrResolver) resolve() (*net.UDPAddr, error) {
+ r.mu.RLock()
+ if err := r.err; err != nil {
+ r.mu.RUnlock()
+ return nil, err
+ }
+ if udp := r.udp; udp != nil {
+ r.mu.RUnlock()
+ return udp, nil
+ }
+ r.mu.RUnlock()
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.udp, r.err = net.ResolveUDPAddr("udp4", r.Addr)
+ return r.udp, r.err
+}
+
+var RecvAddrResolver = &AddrResolver{Addr: "224.0.0.1:1900"}
+
+// SetRecvAddrIPv4 updates multicast address where to receive packets.
+// This never fail now.
+func SetRecvAddrIPv4(addr string) error {
+ RecvAddrResolver.setAddress(addr)
+ return nil
+}
+
+var sendAddrResolver = &AddrResolver{Addr: "239.255.255.250:1900"}
+
+// SendAddr returns an address to send multicast UDP package.
+func SendAddr() (*net.UDPAddr, error) {
+ return sendAddrResolver.resolve()
+}
+
+// SetSendAddrIPv4 updates a UDP address to send multicast packets.
+// This never fail now.
+func SetSendAddrIPv4(addr string) error {
+ sendAddrResolver.setAddress(addr)
+ return nil
+}
diff --git a/vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go b/vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go
new file mode 100644
index 000000000..2439a0fa5
--- /dev/null
+++ b/vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go
@@ -0,0 +1,16 @@
+/*
+Package ssdplog provides log mechanism for ssdp.
+*/
+package ssdplog
+
+import "log"
+
+var LoggerProvider = func() *log.Logger { return nil }
+
+func Printf(s string, a ...interface{}) {
+ if p := LoggerProvider; p != nil {
+ if l := p(); l != nil {
+ l.Printf(s, a...)
+ }
+ }
+}
diff --git a/vendor/github.com/koron/go-ssdp/location.go b/vendor/github.com/koron/go-ssdp/location.go
new file mode 100644
index 000000000..a7970ce26
--- /dev/null
+++ b/vendor/github.com/koron/go-ssdp/location.go
@@ -0,0 +1,40 @@
+package ssdp
+
+import (
+ "fmt"
+ "net"
+)
+
+// LocationProvider provides address for Location header which can be reached from
+// "from" address network.
+type LocationProvider interface {
+ // Location provides an address be reachable from the network located
+ // by "from" address or "ifi" interface.
+ // One of "from" or "ifi" must not be nil.
+ Location(from net.Addr, ifi *net.Interface) string
+}
+
+// LocationProviderFunc type is an adapter to allow the use of ordinary
+// functions are location providers.
+type LocationProviderFunc func(net.Addr, *net.Interface) string
+
+func (f LocationProviderFunc) Location(from net.Addr, ifi *net.Interface) string {
+ return f(from, ifi)
+}
+
+type fixedLocation string
+
+func (s fixedLocation) Location(net.Addr, *net.Interface) string {
+ return string(s)
+}
+
+func toLocationProvider(v interface{}) (LocationProvider, error) {
+ switch w := v.(type) {
+ case string:
+ return fixedLocation(w), nil
+ case LocationProvider:
+ return w, nil
+ default:
+ return nil, fmt.Errorf("location should be a string or a ssdp.LocationProvider but got %T", w)
+ }
+}
diff --git a/vendor/github.com/koron/go-ssdp/log.go b/vendor/github.com/koron/go-ssdp/log.go
deleted file mode 100644
index 56cd5bc8a..000000000
--- a/vendor/github.com/koron/go-ssdp/log.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package ssdp
-
-import "log"
-
-// Logger is default logger for SSDP module.
-var Logger *log.Logger
-
-func logf(s string, a ...interface{}) {
- if l := Logger; l != nil {
- l.Printf(s, a...)
- }
-}
diff --git a/vendor/github.com/koron/go-ssdp/monitor.go b/vendor/github.com/koron/go-ssdp/monitor.go
index 9d79038cf..e564d5cba 100644
--- a/vendor/github.com/koron/go-ssdp/monitor.go
+++ b/vendor/github.com/koron/go-ssdp/monitor.go
@@ -9,6 +9,9 @@ import (
"net"
"net/http"
"sync"
+
+ "github.com/koron/go-ssdp/internal/multicast"
+ "github.com/koron/go-ssdp/internal/ssdplog"
)
// Monitor monitors SSDP's alive and byebye messages.
@@ -17,17 +20,17 @@ type Monitor struct {
Bye ByeHandler
Search SearchHandler
- conn *multicastConn
+ conn *multicast.Conn
wg sync.WaitGroup
}
// Start starts to monitor SSDP messages.
func (m *Monitor) Start() error {
- conn, err := multicastListen(recvAddrResolver)
+ conn, err := multicast.Listen(multicast.RecvAddrResolver)
if err != nil {
return err
}
- logf("monitoring on %s", conn.LocalAddr().String())
+ ssdplog.Printf("monitoring on %s", conn.LocalAddr().String())
m.conn = conn
m.wg.Add(1)
go func() {
@@ -38,7 +41,8 @@ func (m *Monitor) Start() error {
}
func (m *Monitor) serve() error {
- err := m.conn.readPackets(0, func(addr net.Addr, data []byte) error {
+ // TODO: update listening interfaces of m.conn
+ err := m.conn.ReadPackets(0, func(addr net.Addr, data []byte) error {
msg := make([]byte, len(data))
copy(msg, data)
go m.handleRaw(addr, msg)
@@ -62,7 +66,7 @@ func (m *Monitor) handleRaw(addr net.Addr, raw []byte) error {
return m.handleNotify(addr, raw)
}
n := bytes.Index(raw, []byte("\r\n"))
- logf("unexpected method: %q", string(raw[:n]))
+ ssdplog.Printf("unexpected method: %q", string(raw[:n]))
return nil
}
diff --git a/vendor/github.com/koron/go-ssdp/search.go b/vendor/github.com/koron/go-ssdp/search.go
index 7ae646b7e..e4e5ddcca 100644
--- a/vendor/github.com/koron/go-ssdp/search.go
+++ b/vendor/github.com/koron/go-ssdp/search.go
@@ -10,6 +10,9 @@ import (
"regexp"
"strconv"
"time"
+
+ "github.com/koron/go-ssdp/internal/multicast"
+ "github.com/koron/go-ssdp/internal/ssdplog"
)
// Service is discovered service.
@@ -68,15 +71,15 @@ const (
// Search searches services by SSDP.
func Search(searchType string, waitSec int, localAddr string) ([]Service, error) {
// dial multicast UDP packet.
- conn, err := multicastListen(&udpAddrResolver{addr: localAddr})
+ conn, err := multicast.Listen(&multicast.AddrResolver{Addr: localAddr})
if err != nil {
return nil, err
}
defer conn.Close()
- logf("search on %s", conn.LocalAddr().String())
+ ssdplog.Printf("search on %s", conn.LocalAddr().String())
// send request.
- addr, err := multicastSendAddr()
+ addr, err := multicast.SendAddr()
if err != nil {
return nil, err
}
@@ -84,7 +87,7 @@ func Search(searchType string, waitSec int, localAddr string) ([]Service, error)
if err != nil {
return nil, err
}
- if _, err := conn.WriteTo(msg, addr); err != nil {
+ if _, err := conn.WriteTo(multicast.BytesDataProvider(msg), addr); err != nil {
return nil, err
}
@@ -93,15 +96,15 @@ func Search(searchType string, waitSec int, localAddr string) ([]Service, error)
h := func(a net.Addr, d []byte) error {
srv, err := parseService(a, d)
if err != nil {
- logf("invalid search response from %s: %s", a.String(), err)
+ ssdplog.Printf("invalid search response from %s: %s", a.String(), err)
return nil
}
list = append(list, *srv)
- logf("search response from %s: %s", a.String(), srv.USN)
+ ssdplog.Printf("search response from %s: %s", a.String(), srv.USN)
return nil
}
d := time.Second * time.Duration(waitSec)
- if err := conn.readPackets(d, h); err != nil {
+ if err := conn.ReadPackets(d, h); err != nil {
return nil, err
}
diff --git a/vendor/github.com/koron/go-ssdp/ssdp.go b/vendor/github.com/koron/go-ssdp/ssdp.go
new file mode 100644
index 000000000..5b875c02d
--- /dev/null
+++ b/vendor/github.com/koron/go-ssdp/ssdp.go
@@ -0,0 +1,37 @@
+package ssdp
+
+import (
+ "log"
+ "net"
+
+ "github.com/koron/go-ssdp/internal/multicast"
+ "github.com/koron/go-ssdp/internal/ssdplog"
+)
+
+func init() {
+ multicast.InterfacesProvider = func() []net.Interface {
+ return Interfaces
+ }
+ ssdplog.LoggerProvider = func() *log.Logger {
+ return Logger
+ }
+}
+
+// Interfaces specify target interfaces to multicast. If no interfaces are
+// specified, all interfaces will be used.
+var Interfaces []net.Interface
+
+// Logger is default logger for SSDP module.
+var Logger *log.Logger
+
+// SetMulticastRecvAddrIPv4 updates multicast address where to receive packets.
+// This never fail now.
+func SetMulticastRecvAddrIPv4(addr string) error {
+ return multicast.SetRecvAddrIPv4(addr)
+}
+
+// SetMulticastSendAddrIPv4 updates a UDP address to send multicast packets.
+// This never fail now.
+func SetMulticastSendAddrIPv4(addr string) error {
+ return multicast.SetSendAddrIPv4(addr)
+}
diff --git a/vendor/github.com/koron/go-ssdp/udp.go b/vendor/github.com/koron/go-ssdp/udp.go
deleted file mode 100644
index 3a2d2583f..000000000
--- a/vendor/github.com/koron/go-ssdp/udp.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package ssdp
-
-import (
- "net"
- "sync"
-)
-
-type packetHandler func(net.Addr, []byte) error
-
-type udpAddrResolver struct {
- addr string
-
- mu sync.RWMutex
- udp *net.UDPAddr
- err error
-}
-
-func (r *udpAddrResolver) setAddress(addr string) {
- r.mu.Lock()
- r.addr = addr
- r.udp = nil
- r.err = nil
- r.mu.Unlock()
-}
-
-func (r *udpAddrResolver) resolve() (*net.UDPAddr, error) {
- r.mu.RLock()
- if err := r.err; err != nil {
- r.mu.RUnlock()
- return nil, err
- }
- if udp := r.udp; udp != nil {
- r.mu.RUnlock()
- return udp, nil
- }
- r.mu.RUnlock()
-
- r.mu.Lock()
- defer r.mu.Unlock()
- r.udp, r.err = net.ResolveUDPAddr("udp4", r.addr)
- return r.udp, r.err
-}
-
-var recvAddrResolver = &udpAddrResolver{addr: "224.0.0.0:1900"}
-
-// SetMulticastRecvAddrIPv4 updates multicast address where to receive packets.
-// This never fail now.
-func SetMulticastRecvAddrIPv4(addr string) error {
- recvAddrResolver.setAddress(addr)
- return nil
-}
-
-var sendAddrResolver = &udpAddrResolver{addr: "239.255.255.250:1900"}
-
-// multicastSendAddr returns an address to send multicast UDP package.
-func multicastSendAddr() (*net.UDPAddr, error) {
- return sendAddrResolver.resolve()
-}
-
-// SetMulticastSendAddrIPv4 updates a UDP address to send multicast packets.
-// This never fail now.
-func SetMulticastSendAddrIPv4(addr string) error {
- sendAddrResolver.setAddress(addr)
- return nil
-}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/common/type.go b/vendor/github.com/kubeedge/beehive/pkg/common/type.go
new file mode 100644
index 000000000..ce448afe8
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/common/type.go
@@ -0,0 +1,27 @@
+package common
+
+// define channel type
+const (
+ // MsgCtxTypeChannel message type channel
+ MsgCtxTypeChannel = "channel"
+ // MsgCtxTypeUS message type us
+ MsgCtxTypeUS = "unixpacket"
+
+ // ResourceTypeModule resource type module
+ ResourceTypeModule = "module"
+ // OperationTypeModule operation type module
+ OperationTypeModule = "add"
+)
+
+// ModuleInfo is module info
+type ModuleInfo struct {
+ ModuleName string
+ ModuleType string
+ // the below field ModuleSocket is only required for using socket.
+ ModuleSocket
+}
+
+type ModuleSocket struct {
+ IsRemote bool
+ Connection interface{} // only for socket remote mode
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/context/context_channel.go b/vendor/github.com/kubeedge/beehive/pkg/core/channel/context_channel.go
similarity index 66%
rename from vendor/github.com/kubeedge/beehive/pkg/core/context/context_channel.go
rename to vendor/github.com/kubeedge/beehive/pkg/core/channel/context_channel.go
index dd6a660c8..b5faac11a 100644
--- a/vendor/github.com/kubeedge/beehive/pkg/core/context/context_channel.go
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/channel/context_channel.go
@@ -1,7 +1,6 @@
-package context
+package channel
import (
- "errors"
"fmt"
"sync"
"sync/atomic"
@@ -9,6 +8,7 @@ import (
"k8s.io/klog/v2"
+ "github.com/kubeedge/beehive/pkg/common"
"github.com/kubeedge/beehive/pkg/core/model"
)
@@ -21,8 +21,8 @@ const (
TickerTimeoutDefault = 20 * time.Millisecond
)
-// ChannelContext is object for Context channel
-type ChannelContext struct {
+// Context is object for Context channel
+type Context struct {
//ConfigFactory goarchaius.ConfigurationFactory
channels map[string]chan model.Message
chsLock sync.RWMutex
@@ -32,21 +32,26 @@ type ChannelContext struct {
anonChsLock sync.RWMutex
}
+var channelContext *Context
+var once sync.Once
+
// NewChannelContext creates and returns object of new channel context
-// TODO: Singleton
-func NewChannelContext() *ChannelContext {
- channelMap := make(map[string]chan model.Message)
- moduleChannels := make(map[string]map[string]chan model.Message)
- anonChannels := make(map[string]chan model.Message)
- return &ChannelContext{
- channels: channelMap,
- typeChannels: moduleChannels,
- anonChannels: anonChannels,
- }
+func NewChannelContext() *Context {
+ once.Do(func() {
+ channelMap := make(map[string]chan model.Message)
+ moduleChannels := make(map[string]map[string]chan model.Message)
+ anonChannels := make(map[string]chan model.Message)
+ channelContext = &Context{
+ channels: channelMap,
+ typeChannels: moduleChannels,
+ anonChannels: anonChannels,
+ }
+ })
+ return channelContext
}
// Cleanup close modules
-func (ctx *ChannelContext) Cleanup(module string) {
+func (ctx *Context) Cleanup(module string) {
if channel := ctx.getChannel(module); channel != nil {
ctx.delChannel(module)
// decrease probable exception of channel closing
@@ -56,7 +61,7 @@ func (ctx *ChannelContext) Cleanup(module string) {
}
// Send send msg to a module. Todo: do not stuck
-func (ctx *ChannelContext) Send(module string, message model.Message) {
+func (ctx *Context) Send(module string, message model.Message) {
// avoid exception because of channel closing
// TODO: need reconstruction
defer func() {
@@ -73,7 +78,7 @@ func (ctx *ChannelContext) Send(module string, message model.Message) {
}
// Receive msg from channel of module
-func (ctx *ChannelContext) Receive(module string) (model.Message, error) {
+func (ctx *Context) Receive(module string) (model.Message, error) {
if channel := ctx.getChannel(module); channel != nil {
content := <-channel
return content, nil
@@ -88,7 +93,7 @@ func getAnonChannelName(msgID string) string {
}
// SendSync sends message in a sync way
-func (ctx *ChannelContext) SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error) {
+func (ctx *Context) SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error) {
// avoid exception because of channel closing
// TODO: need reconstruction
defer func() {
@@ -111,14 +116,6 @@ func (ctx *ChannelContext) SendSync(module string, message model.Message, timeou
return model.Message{}, fmt.Errorf("bad request module name(%s)", module)
}
- sendTimer := time.NewTimer(timeout)
- select {
- case reqChannel <- message:
- case <-sendTimer.C:
- return model.Message{}, errors.New("timeout to send message")
- }
- sendTimer.Stop()
-
// new anonymous channel for response
anonChan := make(chan model.Message)
anonName := getAnonChannelName(message.GetID())
@@ -132,55 +129,61 @@ func (ctx *ChannelContext) SendSync(module string, message model.Message, timeou
ctx.anonChsLock.Unlock()
}()
+ select {
+ case reqChannel <- message:
+ case <-time.After(timeout):
+ return model.Message{}, fmt.Errorf("timeout to send message %s", message.GetID())
+ }
+
var resp model.Message
- respTimer := time.NewTimer(time.Until(deadline))
select {
case resp = <-anonChan:
- case <-respTimer.C:
- return model.Message{}, errors.New("timeout to get response")
+ case <-time.After(time.Until(deadline)):
+ return model.Message{}, fmt.Errorf("timeout to get response for message %s", message.GetID())
}
- respTimer.Stop()
return resp, nil
}
// SendResp send resp for this message when using sync mode
-func (ctx *ChannelContext) SendResp(message model.Message) {
+func (ctx *Context) SendResp(message model.Message) {
anonName := getAnonChannelName(message.GetParentID())
ctx.anonChsLock.RLock()
defer ctx.anonChsLock.RUnlock()
if channel, exist := ctx.anonChannels[anonName]; exist {
- channel <- message
+ select {
+ case channel <- message:
+ default:
+ klog.Warningf("no goroutine is ready for receive the message from "+
+ "unbuffered response channel, discard this resp message for %s", message.GetParentID())
+ }
return
}
- klog.V(4).Infof("Get bad anonName:%s when sendresp message, do nothing", anonName)
+ klog.Warningf("Get bad anonName:%s when sendresp message, do nothing", anonName)
}
// SendToGroup send msg to modules. Todo: do not stuck
-func (ctx *ChannelContext) SendToGroup(moduleType string, message model.Message) {
- // avoid exception because of channel closing
- // TODO: need reconstruction
- defer func() {
- if exception := recover(); exception != nil {
- klog.Warningf("Recover when sendToGroup message, exception: %+v", exception)
- }
- }()
-
- send := func(ch chan model.Message) {
+func (ctx *Context) SendToGroup(moduleType string, message model.Message) {
+ send := func(module string, ch chan model.Message) {
+ // avoid exception because of channel closing
+ // TODO: need reconstruction
+ defer func() {
+ if exception := recover(); exception != nil {
+ klog.Warningf("Recover when sendToGroup message, exception: %+v", exception)
+ }
+ }()
select {
case ch <- message:
default:
- klog.Warningf("the message channel is full, message: %+v", message)
- select {
- case ch <- message:
- }
+ klog.Warningf("The module %s message channel is full, message: %+v", module, message)
+ ch <- message
}
}
if channelList := ctx.getTypeChannel(moduleType); channelList != nil {
- for _, channel := range channelList {
- go send(channel)
+ for module, channel := range channelList {
+ go send(module, channel)
}
return
}
@@ -189,15 +192,7 @@ func (ctx *ChannelContext) SendToGroup(moduleType string, message model.Message)
// SendToGroupSync : broadcast the message to echo module channel, the module send response back anon channel
// check timeout and the size of anon channel
-func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
- // avoid exception because of channel closing
- // TODO: need reconstruction
- defer func() {
- if exception := recover(); exception != nil {
- klog.Warningf("Recover when sendToGroupsync message, exception: %+v", exception)
- }
- }()
-
+func (ctx *Context) SendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
if timeout <= 0 {
timeout = MessageTimeoutDefault
}
@@ -208,7 +203,7 @@ func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Mess
return fmt.Errorf("failed to get module type(%s) channel list", moduleType)
}
- // echo module must sync a response,
+ // each module must sync a response,
// let anonchan size be module number
channelNumber := len(channelList)
anonChan := make(chan model.Message, channelNumber)
@@ -242,6 +237,13 @@ func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Mess
var timeoutCounter int32
send := func(ch chan model.Message) {
+ // avoid exception because of channel closing
+ // TODO: need reconstruction
+ defer func() {
+ if exception := recover(); exception != nil {
+ klog.Warningf("Recover when sendToGroupsync message, exception: %+v", exception)
+ }
+ }()
sendTimer := time.NewTimer(time.Until(deadline))
select {
case ch <- message:
@@ -264,13 +266,15 @@ func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Mess
select {
case <-ticker.C:
case <-sendTimer.C:
- cleanup()
+ err := cleanup()
+ if err != nil {
+ klog.Errorf("Failed to cleanup, error: %v", err)
+ }
if timeoutCounter != 0 {
- errInfo := fmt.Sprintf("timeout to send message, several %d timeout when send", timeoutCounter)
- return fmt.Errorf(errInfo)
+ return fmt.Errorf("timeout to send message, several %d timeout when send", timeoutCounter)
}
klog.Error("Timeout to sendToGroupsync message")
- return fmt.Errorf("Timeout to send message")
+ return fmt.Errorf("timeout to send message")
}
}
@@ -278,13 +282,13 @@ func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Mess
}
// New Channel
-func (ctx *ChannelContext) newChannel() chan model.Message {
+func (ctx *Context) newChannel() chan model.Message {
channel := make(chan model.Message, ChannelSizeDefault)
return channel
}
// getChannel return chan
-func (ctx *ChannelContext) getChannel(module string) chan model.Message {
+func (ctx *Context) getChannel(module string) chan model.Message {
ctx.chsLock.RLock()
defer ctx.chsLock.RUnlock()
@@ -292,12 +296,12 @@ func (ctx *ChannelContext) getChannel(module string) chan model.Message {
return ctx.channels[module]
}
- klog.Warningf("Failed to get channel, type:%s", module)
+ klog.Warningf("Failed to get channel for module:%s", module)
return nil
}
// addChannel return chan
-func (ctx *ChannelContext) addChannel(module string, moduleCh chan model.Message) {
+func (ctx *Context) addChannel(module string, moduleCh chan model.Message) {
ctx.chsLock.Lock()
defer ctx.chsLock.Unlock()
@@ -305,16 +309,15 @@ func (ctx *ChannelContext) addChannel(module string, moduleCh chan model.Message
}
// deleteChannel by module name
-func (ctx *ChannelContext) delChannel(module string) {
+func (ctx *Context) delChannel(module string) {
// delete module channel from channels map
ctx.chsLock.Lock()
- _, exist := ctx.channels[module]
- if !exist {
+ if _, exist := ctx.channels[module]; !exist {
+ ctx.chsLock.Unlock()
klog.Warningf("Failed to get channel, module:%s", module)
return
}
delete(ctx.channels, module)
-
ctx.chsLock.Unlock()
// delete module channel from typechannels map
@@ -329,19 +332,19 @@ func (ctx *ChannelContext) delChannel(module string) {
}
// getTypeChannel return chan
-func (ctx *ChannelContext) getTypeChannel(moduleType string) map[string]chan model.Message {
+func (ctx *Context) getTypeChannel(moduleType string) map[string]chan model.Message {
ctx.typeChsLock.RLock()
defer ctx.typeChsLock.RUnlock()
- if _, exist := ctx.typeChannels[moduleType]; exist {
- return ctx.typeChannels[moduleType]
+ if v, exist := ctx.typeChannels[moduleType]; exist {
+ return v
}
klog.Warningf("Failed to get type channel, type:%s", moduleType)
return nil
}
-func (ctx *ChannelContext) getModuleByChannel(ch chan model.Message) string {
+func (ctx *Context) getModuleByChannel(ch chan model.Message) string {
ctx.chsLock.RLock()
defer ctx.chsLock.RUnlock()
@@ -356,7 +359,7 @@ func (ctx *ChannelContext) getModuleByChannel(ch chan model.Message) string {
}
// addTypeChannel put modules into moduleType map
-func (ctx *ChannelContext) addTypeChannel(module, group string, moduleCh chan model.Message) {
+func (ctx *Context) addTypeChannel(module, group string, moduleCh chan model.Message) {
ctx.typeChsLock.Lock()
defer ctx.typeChsLock.Unlock()
@@ -367,13 +370,13 @@ func (ctx *ChannelContext) addTypeChannel(module, group string, moduleCh chan mo
}
// AddModule adds module into module context
-func (ctx *ChannelContext) AddModule(module string) {
+func (ctx *Context) AddModule(info *common.ModuleInfo) {
channel := ctx.newChannel()
- ctx.addChannel(module, channel)
+ ctx.addChannel(info.ModuleName, channel)
}
// AddModuleGroup adds modules into module context group
-func (ctx *ChannelContext) AddModuleGroup(module, group string) {
+func (ctx *Context) AddModuleGroup(module, group string) {
if channel := ctx.getChannel(module); channel != nil {
ctx.addTypeChannel(module, group, channel)
return
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/context/context.go b/vendor/github.com/kubeedge/beehive/pkg/core/context/context.go
index 6bbe22563..e3ab901ef 100644
--- a/vendor/github.com/kubeedge/beehive/pkg/core/context/context.go
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/context/context.go
@@ -1,15 +1,15 @@
package context
import (
- gocontext "context"
"time"
+ "github.com/kubeedge/beehive/pkg/common"
"github.com/kubeedge/beehive/pkg/core/model"
)
// ModuleContext is interface for context module management
type ModuleContext interface {
- AddModule(module string)
+ AddModule(info *common.ModuleInfo)
AddModuleGroup(module, group string)
Cleanup(module string)
}
@@ -23,14 +23,6 @@ type MessageContext interface {
SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error)
SendResp(message model.Message)
// group broadcast
- SendToGroup(moduleType string, message model.Message)
- SendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error
-}
-
-// Context is global context object
-type beehiveContext struct {
- moduleContext ModuleContext
- messageContext MessageContext
- ctx gocontext.Context
- cancel gocontext.CancelFunc
+ SendToGroup(group string, message model.Message)
+ SendToGroupSync(group string, message model.Message, timeout time.Duration) error
}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/context/context_factory.go b/vendor/github.com/kubeedge/beehive/pkg/core/context/context_factory.go
index 2cf48b9f3..52c91fffc 100644
--- a/vendor/github.com/kubeedge/beehive/pkg/core/context/context_factory.go
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/context/context_factory.go
@@ -2,85 +2,141 @@ package context
import (
gocontext "context"
+ "fmt"
"sync"
"time"
"k8s.io/klog/v2"
+ "github.com/kubeedge/beehive/pkg/common"
+ "github.com/kubeedge/beehive/pkg/core/channel"
"github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/beehive/pkg/core/socket"
)
-// define channel type
-const (
- MsgCtxTypeChannel = "channel"
-)
+// GlobalContext is global context: only use for local cache to dispatch message
+type GlobalContext struct {
+ // context type(socket/channel) -> context
+ moduleContext map[string]ModuleContext
+ messageContext map[string]MessageContext
+
+ // module name to context type
+ moduleContextType map[string]string
+ // group name to context type
+ groupContextType map[string]string
+
+ ctx gocontext.Context
+ cancel gocontext.CancelFunc
+ ctxLock sync.RWMutex
+}
+
+func init() {
+ ctx, cancel := gocontext.WithCancel(gocontext.Background())
+ globalContext = &GlobalContext{
+ moduleContext: make(map[string]ModuleContext),
+ messageContext: make(map[string]MessageContext),
+
+ moduleContextType: make(map[string]string),
+ groupContextType: make(map[string]string),
+
+ ctx: ctx,
+ cancel: cancel,
+ }
+}
var (
// singleton
- context *beehiveContext
- once sync.Once
+ globalContext *GlobalContext
+ once sync.Once
)
// InitContext gets global context instance
-func InitContext(contextType string) {
- once.Do(func() {
- ctx, cancel := gocontext.WithCancel(gocontext.Background())
- context = &beehiveContext{
- ctx: ctx,
- cancel: cancel,
- }
+func InitContext(contextTypes []string) {
+ for _, contextType := range contextTypes {
switch contextType {
- case MsgCtxTypeChannel:
- channelContext := NewChannelContext()
- context.messageContext = channelContext
- context.moduleContext = channelContext
+ case common.MsgCtxTypeChannel:
+ channelContext := channel.NewChannelContext()
+ globalContext.moduleContext[contextType] = channelContext
+ globalContext.messageContext[contextType] = channelContext
+ case common.MsgCtxTypeUS:
+ socketContext := socket.InitSocketContext()
+ globalContext.moduleContext[contextType] = socketContext
+ globalContext.messageContext[contextType] = socketContext
default:
- klog.Fatalf("Do not support context type:%s", contextType)
+ klog.Exitf("unsupported context type: %s", contextType)
}
- })
+ }
}
func GetContext() gocontext.Context {
- return context.ctx
+ return globalContext.ctx
}
+
func Done() <-chan struct{} {
- return context.ctx.Done()
+ return globalContext.ctx.Done()
}
// AddModule adds module into module context
-func AddModule(module string) {
- context.moduleContext.AddModule(module)
+func AddModule(module *common.ModuleInfo) {
+ setModuleContextType(module.ModuleName, module.ModuleType)
+
+ moduleContext, err := getModuleContext(module.ModuleName)
+ if err != nil {
+ klog.Errorf("failed to get module context, module name: %s, err: %v", module.ModuleName, err)
+ return
+ }
+
+ moduleContext.AddModule(module)
}
// AddModuleGroup adds module into module context group
func AddModuleGroup(module, group string) {
- context.moduleContext.AddModuleGroup(module, group)
+ setGroupContextType(module, group)
+
+ moduleContext, err := getModuleContext(module)
+ if err != nil {
+ klog.Errorf("failed to get module context, module name: %s, err: %v", module, err)
+ return
+ }
+
+ moduleContext.AddModuleGroup(module, group)
}
// Cancel function
func Cancel() {
- context.cancel()
+ globalContext.cancel()
}
// Cleanup cleans up module
func Cleanup(module string) {
- context.moduleContext.Cleanup(module)
+ moduleContext, err := getModuleContext(module)
+ if err != nil {
+ klog.Errorf("failed to get module context, module name: %s, err: %v", module, err)
+ return
+ }
+
+ moduleContext.Cleanup(module)
}
// Send the message
func Send(module string, message model.Message) {
- context.messageContext.Send(module, message)
+ messageContext, err := getMessageContext(module)
+ if err != nil {
+ return
+ }
+
+ messageContext.Send(module, message)
}
// Receive the message
// module : local module name
func Receive(module string) (model.Message, error) {
- message, err := context.messageContext.Receive(module)
- if err == nil {
- return message, nil
+ messageContext, err := getMessageContext(module)
+ if err != nil {
+ return model.Message{}, err
}
- klog.Warningf("Receive: failed to receive message, error:%v", err)
- return message, err
+
+ return messageContext.Receive(module)
}
// SendSync sends message in sync mode
@@ -88,25 +144,116 @@ func Receive(module string) (model.Message, error) {
// timeout: if <= 0 using default value(30s)
func SendSync(module string,
message model.Message, timeout time.Duration) (model.Message, error) {
- resp, err := context.messageContext.SendSync(module, message, timeout)
- if err == nil {
- return resp, nil
+ messageContext, err := getMessageContext(module)
+ if err != nil {
+ return model.Message{}, err
}
- return model.Message{}, err
+
+ return messageContext.SendSync(module, message, timeout)
}
// SendResp sends response
// please get resp message using model.NewRespByMessage
func SendResp(resp model.Message) {
- context.messageContext.SendResp(resp)
+ messageContext, err := getMessageContextByMessageType(resp.GetType())
+ if err != nil {
+ klog.Errorf("message context for module doesn't exist, module name: %s", resp.GetSource())
+ return
+ }
+
+ messageContext.SendResp(resp)
}
// SendToGroup broadcasts the message to all of group members
-func SendToGroup(moduleType string, message model.Message) {
- context.messageContext.SendToGroup(moduleType, message)
+func SendToGroup(group string, message model.Message) {
+ messageContext, err := getMessageContextByGroup(group)
+ if err != nil {
+ klog.Errorf("message context for group doesn't exist, group name: %s", group)
+ return
+ }
+
+ messageContext.SendToGroup(group, message)
}
-// sendToGroupSync broadcasts the message to all of group members in sync mode
-func sendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
- return context.messageContext.SendToGroupSync(moduleType, message, timeout)
+// SendToGroupSync broadcasts the message to all of group members in sync mode
+func SendToGroupSync(group string, message model.Message, timeout time.Duration) error {
+ messageContext, err := getMessageContextByGroup(group)
+ if err != nil {
+ return fmt.Errorf("message context for group doesn't exist, group name: %s", group)
+ }
+
+ return messageContext.SendToGroupSync(group, message, timeout)
+}
+
+func getModuleContext(moduleName string) (ModuleContext, error) {
+ globalContext.ctxLock.RLock()
+ defer globalContext.ctxLock.RUnlock()
+
+ moduleContextType := getModuleContextType(moduleName)
+ moduleContext, ok := globalContext.moduleContext[moduleContextType]
+ if !ok {
+ return nil, fmt.Errorf("module context %v doesn't exist", moduleContextType)
+ }
+
+ return moduleContext, nil
+}
+
+func getMessageContext(moduleName string) (MessageContext, error) {
+ globalContext.ctxLock.RLock()
+ defer globalContext.ctxLock.RUnlock()
+
+ moduleContextType := getModuleContextType(moduleName)
+ messageContext, ok := globalContext.messageContext[moduleContextType]
+ if !ok {
+ return nil, fmt.Errorf("message context %v doesn't exist", moduleContextType)
+ }
+
+ return messageContext, nil
+}
+
+func getMessageContextByMessageType(messageType string) (MessageContext, error) {
+ if messageType == "" {
+ messageType = common.MsgCtxTypeChannel
+ }
+
+ globalContext.ctxLock.RLock()
+ defer globalContext.ctxLock.RUnlock()
+
+ messageContext, ok := globalContext.messageContext[messageType]
+ if !ok {
+ return nil, fmt.Errorf("message context for message type doesn't exist, message type: %s", messageType)
+ }
+
+ return messageContext, nil
+}
+
+func getMessageContextByGroup(group string) (MessageContext, error) {
+ globalContext.ctxLock.RLock()
+ defer globalContext.ctxLock.RUnlock()
+
+ contextType := globalContext.groupContextType[group]
+ messageContext, ok := globalContext.messageContext[contextType]
+ if !ok {
+ return nil, fmt.Errorf("message context doesn't exist, group: %s, contextType: %s", group, contextType)
+ }
+
+ return messageContext, nil
+}
+
+// caller must lock the globalContext.ctxLock
+func getModuleContextType(moduleName string) string {
+ return globalContext.moduleContextType[moduleName]
+}
+
+func setModuleContextType(moduleName string, contextType string) {
+ globalContext.ctxLock.Lock()
+ defer globalContext.ctxLock.Unlock()
+ globalContext.moduleContextType[moduleName] = contextType
+}
+
+func setGroupContextType(module string, group string) {
+ globalContext.ctxLock.Lock()
+ defer globalContext.ctxLock.Unlock()
+
+ globalContext.groupContextType[group] = globalContext.moduleContextType[module]
}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/context/context_unixsocket.go b/vendor/github.com/kubeedge/beehive/pkg/core/context/context_unixsocket.go
deleted file mode 100644
index 9967ce08b..000000000
--- a/vendor/github.com/kubeedge/beehive/pkg/core/context/context_unixsocket.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package context
-
-import (
- "sync"
- "time"
-
- "github.com/kubeedge/beehive/pkg/core/model"
-)
-
-// UnixSocketContext unixsocket struct
-type UnixSocketContext struct {
- filename string
- bufsize int
- handler func(string) string
-}
-
-var (
- // singleton
- usContext *UnixSocketContext
- usOnce sync.Once
-)
-
-// GetUnixSocketContext defines and returns unix socket context object
-func GetUnixSocketContext() *UnixSocketContext {
- usOnce.Do(func() {
- usContext = &UnixSocketContext{}
- })
- return usContext
-}
-
-// AddModule adds module to context
-func (ctx *UnixSocketContext) AddModule(module string) {
-
-}
-
-// AddModuleGroup adds module to module context group
-func (ctx *UnixSocketContext) AddModuleGroup(module, group string) {
-
-}
-
-// Cleanup cleans up module
-func (ctx *UnixSocketContext) Cleanup(module string) {
-
-}
-
-// Send async mode
-func (ctx *UnixSocketContext) Send(module string, content interface{}) {
-
-}
-
-//Receive the message
-//local module name
-func (ctx *UnixSocketContext) Receive(module string) interface{} {
- return nil
-}
-
-// SendSync sends message in sync mode
-// module: the destination of the message
-func (ctx *UnixSocketContext) SendSync(module string, message model.Message, timeout time.Duration) (interface{}, error) {
- return nil, nil
-}
-
-// SendResp sends response
-func (ctx *UnixSocketContext) SendResp(messageID string, content interface{}) {
-
-}
-
-// SendToGroup broadcasts the message to all of group members
-func (ctx *UnixSocketContext) SendToGroup(moduleType string, content interface{}) {
-
-}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/core.go b/vendor/github.com/kubeedge/beehive/pkg/core/core.go
index 47b6abd22..0ff8b53ff 100644
--- a/vendor/github.com/kubeedge/beehive/pkg/core/core.go
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/core.go
@@ -7,39 +7,60 @@ import (
"k8s.io/klog/v2"
+ "github.com/kubeedge/beehive/pkg/common"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
)
// StartModules starts modules that are registered
func StartModules() {
- beehiveContext.InitContext(beehiveContext.MsgCtxTypeChannel)
+ // only register channel mode, if want to use socket mode, we should also pass in common.MsgCtxTypeUS parameter
+ beehiveContext.InitContext([]string{common.MsgCtxTypeChannel})
modules := GetModules()
+
for name, module := range modules {
- //Init the module
- beehiveContext.AddModule(name)
- //Assemble typeChannels for sendToGroup
- beehiveContext.AddModuleGroup(name, module.Group())
- go module.Start()
- klog.Infof("Starting module %v", name)
+ var m common.ModuleInfo
+ switch module.contextType {
+ case common.MsgCtxTypeChannel:
+ m = common.ModuleInfo{
+ ModuleName: name,
+ ModuleType: module.contextType,
+ }
+ case common.MsgCtxTypeUS:
+ m = common.ModuleInfo{
+ ModuleName: name,
+ ModuleType: module.contextType,
+ // the below field ModuleSocket is only required for using socket.
+ ModuleSocket: common.ModuleSocket{
+ IsRemote: module.remote,
+ },
+ }
+ default:
+ klog.Exitf("unsupported context type: %s", module.contextType)
+ }
+
+ beehiveContext.AddModule(&m)
+ beehiveContext.AddModuleGroup(name, module.module.Group())
+
+ go moduleKeeper(name, module, m)
+ klog.Infof("starting module %s", name)
}
}
// GracefulShutdown is if it gets the special signals it does modules cleanup
func GracefulShutdown() {
- c := make(chan os.Signal)
+ c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM,
syscall.SIGQUIT, syscall.SIGILL, syscall.SIGTRAP, syscall.SIGABRT)
- select {
- case s := <-c:
- klog.Infof("Get os signal %v", s.String())
- //Cleanup each modules
- beehiveContext.Cancel()
- modules := GetModules()
- for name, _ := range modules {
- klog.Infof("Cleanup module %v", name)
- beehiveContext.Cleanup(name)
- }
+ s := <-c
+ klog.Infof("Get os signal %v", s.String())
+
+ // Cleanup each modules
+ beehiveContext.Cancel()
+ modules := GetModules()
+ for name := range modules {
+ klog.Infof("Cleanup module %v", name)
+ beehiveContext.Cleanup(name)
}
}
@@ -50,3 +71,16 @@ func Run() {
// monitor system signal and shutdown gracefully
GracefulShutdown()
}
+
+func moduleKeeper(name string, moduleInfo *ModuleInfo, m common.ModuleInfo) {
+ for {
+ moduleInfo.module.Start()
+ // local modules are always online
+ if !moduleInfo.remote {
+ return
+ }
+ // try to add module for remote modules
+ beehiveContext.AddModule(&m)
+ beehiveContext.AddModuleGroup(name, moduleInfo.module.Group())
+ }
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/model/message.go b/vendor/github.com/kubeedge/beehive/pkg/core/model/message.go
index db53c2247..172c819d9 100644
--- a/vendor/github.com/kubeedge/beehive/pkg/core/model/message.go
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/model/message.go
@@ -1,9 +1,13 @@
package model
import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
"time"
- uuid "github.com/satori/go.uuid"
+ "github.com/google/uuid"
)
// Constants for database operations and resource type settings
@@ -12,16 +16,25 @@ const (
DeleteOperation = "delete"
QueryOperation = "query"
UpdateOperation = "update"
+ PatchOperation = "patch"
+ UploadOperation = "upload"
ResponseOperation = "response"
ResponseErrorOperation = "error"
- ResourceTypePod = "pod"
- ResourceTypeConfigmap = "configmap"
- ResourceTypeSecret = "secret"
- ResourceTypeNode = "node"
- ResourceTypePodlist = "podlist"
- ResourceTypePodStatus = "podstatus"
- ResourceTypeNodeStatus = "nodestatus"
+ ResourceTypePod = "pod"
+ ResourceTypeConfigmap = "configmap"
+ ResourceTypeServiceAccountToken = "serviceaccounttoken"
+ ResourceTypeSecret = "secret"
+ ResourceTypeNode = "node"
+ ResourceTypePodlist = "podlist"
+ ResourceTypePodStatus = "podstatus"
+ ResourceTypePodPatch = "podpatch"
+ ResourceTypeNodeStatus = "nodestatus"
+ ResourceTypeNodePatch = "nodepatch"
+ ResourceTypeRule = "rule"
+ ResourceTypeRuleEndpoint = "ruleendpoint"
+ ResourceTypeRuleStatus = "rulestatus"
+ ResourceTypeLease = "lease"
)
// Message struct
@@ -35,6 +48,8 @@ type Message struct {
type MessageRoute struct {
// where the message come from
Source string `json:"source,omitempty"`
+ // where the message will send to
+ Destination string `json:"destination,omitempty"`
// where the message will broadcast to
Group string `json:"group,omitempty"`
@@ -59,6 +74,9 @@ type MessageHeader struct {
ResourceVersion string `json:"resourceversion,omitempty"`
// the flag will be set in sendsync
Sync bool `json:"sync,omitempty"`
+ // message type indicates the context type that delivers the message, such as channel, unixsocket, etc.
+ // if the value is empty, the channel context type will be used.
+ MessageType string `json:"type,omitempty"`
}
// BuildRouter sets route and resource operation in message
@@ -68,6 +86,28 @@ func (msg *Message) BuildRouter(source, group, res, opr string) *Message {
return msg
}
+// SetType set message context type
+func (msg *Message) SetType(msgType string) *Message {
+ msg.Header.MessageType = msgType
+ return msg
+}
+
+// SetDestination set destination
+func (msg *Message) SetDestination(dest string) *Message {
+ msg.Router.Destination = dest
+ return msg
+}
+
+// GetType get message context type
+func (msg *Message) GetType() string {
+ return msg.Header.MessageType
+}
+
+// IsEmpty is empty
+func (msg *Message) IsEmpty() bool {
+ return reflect.DeepEqual(msg, &Message{})
+}
+
// SetResourceOperation sets router resource and operation in message
func (msg *Message) SetResourceOperation(res, opr string) *Message {
msg.Router.Resource = res
@@ -118,29 +158,46 @@ func (msg *Message) GetID() string {
return msg.Header.ID
}
-//GetParentID returns message parent id
+// GetParentID returns message parent id
func (msg *Message) GetParentID() string {
return msg.Header.ParentID
}
-//GetTimestamp returns message timestamp
+// GetTimestamp returns message timestamp
func (msg *Message) GetTimestamp() int64 {
return msg.Header.Timestamp
}
-//GetContent returns message content
+// GetContent returns message content
func (msg *Message) GetContent() interface{} {
return msg.Content
}
-//GetResourceVersion returns message resource version
+// GetContentData returns message content data
+func (msg *Message) GetContentData() ([]byte, error) {
+ if data, ok := msg.Content.([]byte); ok {
+ return data, nil
+ }
+
+ if data, ok := msg.Content.(string); ok {
+ return []byte(data), nil
+ }
+
+ data, err := json.Marshal(msg.Content)
+ if err != nil {
+ return nil, fmt.Errorf("marshal message content failed: %s", err)
+ }
+ return data, nil
+}
+
+// GetResourceVersion returns message resource version
func (msg *Message) GetResourceVersion() string {
return msg.Header.ResourceVersion
}
-//UpdateID returns message object updating its ID
+// UpdateID returns message object updating its ID
func (msg *Message) UpdateID() *Message {
- msg.Header.ID = uuid.NewV4().String()
+ msg.Header.ID = uuid.New().String()
return msg
}
@@ -168,7 +225,7 @@ func NewRawMessage() *Message {
// model.NewMessage().BuildRouter().FillBody()
func NewMessage(parentID string) *Message {
msg := &Message{}
- msg.Header.ID = uuid.NewV4().String()
+ msg.Header.ID = uuid.New().String()
msg.Header.ParentID = parentID
msg.Header.Timestamp = time.Now().UnixNano() / 1e6
return msg
@@ -177,7 +234,7 @@ func NewMessage(parentID string) *Message {
// Clone a message
// only update message id
func (msg *Message) Clone(message *Message) *Message {
- msgID := uuid.NewV4().String()
+ msgID := uuid.New().String()
return NewRawMessage().BuildHeader(msgID, message.GetParentID(), message.GetTimestamp()).
BuildRouter(message.GetSource(), message.GetGroup(), message.GetResource(), message.GetOperation()).
FillBody(message.GetContent())
@@ -187,6 +244,7 @@ func (msg *Message) Clone(message *Message) *Message {
func (msg *Message) NewRespByMessage(message *Message, content interface{}) *Message {
return NewMessage(message.GetID()).SetRoute(message.GetSource(), message.GetGroup()).
SetResourceOperation(message.GetResource(), ResponseOperation).
+ SetType(message.GetType()).
FillBody(content)
}
@@ -196,3 +254,21 @@ func NewErrorMessage(message *Message, errContent string) *Message {
SetResourceOperation(message.Router.Resource, ResponseErrorOperation).
FillBody(errContent)
}
+
+// GetDestination get destination
+func (msg *Message) GetDestination() string {
+ return msg.Router.Destination
+}
+
+// String the content that you want to send
+func (msg *Message) String() string {
+ var buffer bytes.Buffer
+ buffer.WriteString("MessageID: " + msg.GetID())
+ buffer.WriteString(" ParentID: " + msg.GetParentID())
+ buffer.WriteString(" Group: " + msg.GetGroup())
+ buffer.WriteString(" Source: " + msg.GetSource())
+ buffer.WriteString(" Destination: " + msg.GetDestination())
+ buffer.WriteString(" Resource: " + msg.GetResource())
+ buffer.WriteString(" Operation: " + msg.GetOperation())
+ return buffer.String()
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/module.go b/vendor/github.com/kubeedge/beehive/pkg/core/module.go
index 53e4cf8a1..e44dfc255 100644
--- a/vendor/github.com/kubeedge/beehive/pkg/core/module.go
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/module.go
@@ -2,10 +2,9 @@ package core
import (
"k8s.io/klog/v2"
-)
-const (
- tryReadKeyTimes = 5
+ "github.com/kubeedge/beehive/pkg/common"
+ "github.com/kubeedge/beehive/pkg/core/socket"
)
// Module interface
@@ -18,27 +17,64 @@ type Module interface {
var (
// Modules map
- modules map[string]Module
- disabledModules map[string]Module
+ modules map[string]*ModuleInfo
+ disabledModules map[string]*ModuleInfo
)
func init() {
- modules = make(map[string]Module)
- disabledModules = make(map[string]Module)
+ modules = make(map[string]*ModuleInfo)
+ disabledModules = make(map[string]*ModuleInfo)
+}
+
+// ModuleInfo represent a module info
+type ModuleInfo struct {
+ contextType string
+ remote bool
+ module Module
}
// Register register module
-func Register(m Module) {
+// if not passed in parameter opts, default contextType is "channel"
+func Register(m Module, opts ...string) {
+ info := &ModuleInfo{
+ module: m,
+ contextType: common.MsgCtxTypeChannel,
+ remote: false,
+ }
+
+ if len(opts) > 0 {
+ info.contextType = opts[0]
+ info.remote = true
+ }
+
if m.Enable() {
- modules[m.Name()] = m
- klog.Infof("Module %v registered successfully", m.Name())
+ modules[m.Name()] = info
+ klog.Infof("Module %s registered successfully", m.Name())
} else {
- disabledModules[m.Name()] = m
+ disabledModules[m.Name()] = info
klog.Warningf("Module %v is disabled, do not register", m.Name())
}
}
// GetModules gets modules map
-func GetModules() map[string]Module {
+func GetModules() map[string]*ModuleInfo {
return modules
}
+
+// GetModule gets module
+func (m *ModuleInfo) GetModule() Module {
+ return m.module
+}
+
+// GetModuleExchange return module exchange
+func GetModuleExchange() *socket.ModuleExchange {
+ exchange := socket.ModuleExchange{
+ Groups: make(map[string][]string),
+ }
+ for name, moduleInfo := range modules {
+ exchange.Modules = append(exchange.Modules, name)
+ group := moduleInfo.module.Group()
+ exchange.Groups[group] = append(exchange.Groups[group], name)
+ }
+ return &exchange
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/broker/broker.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/broker/broker.go
new file mode 100644
index 000000000..a22411352
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/broker/broker.go
@@ -0,0 +1,143 @@
+package broker
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "time"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/beehive/pkg/core/socket/synckeeper"
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper"
+)
+
+const (
+ syncMessageTimeoutDefault = 10 * time.Second
+)
+
+// RemoteBroker remote broker
+type RemoteBroker struct {
+ keeper *synckeeper.Keeper
+}
+
+// ConnectOptions connect options
+type ConnectOptions struct {
+ Address string
+ MessageType string
+ BufferSize int
+ Cert tls.Certificate
+
+ // for websocket/http
+ RequestHeader http.Header
+}
+
+// ConnectFunc connect func
+type ConnectFunc func(ConnectOptions) (interface{}, error)
+
+// NewRemoteBroker new remote broker
+func NewRemoteBroker() *RemoteBroker {
+ return &RemoteBroker{
+ keeper: synckeeper.NewKeeper(),
+ }
+}
+
+// Connect connect
+func (broker *RemoteBroker) Connect(opts ConnectOptions, connect ConnectFunc) wrapper.Conn {
+ conn, err := connect(opts)
+ if err != nil {
+ klog.Errorf("failed to connect, address: %s; error:%+v", opts.Address, err)
+ return nil
+ }
+ return wrapper.NewWrapper(opts.MessageType, conn, opts.BufferSize)
+}
+
+// Send send
+func (broker *RemoteBroker) Send(conn wrapper.Conn, message model.Message) error {
+ //log.LOGGER.Infof("connection: %+v message: %+v", conn, message)
+ err := conn.WriteJSON(&message)
+ if err != nil {
+ klog.Errorf("failed to write with error %+v", err)
+ return fmt.Errorf("failed to write, error: %+v", err)
+ }
+ return nil
+}
+
+// Receive receive
+func (broker *RemoteBroker) Receive(conn wrapper.Conn) (model.Message, error) {
+ var message model.Message
+ for {
+ err := conn.SetReadDeadline(time.Time{})
+ err = conn.ReadJSON(&message)
+ if err != nil {
+ klog.Errorf("failed to read, error:%+v", err)
+ return model.Message{}, fmt.Errorf("failed to read, error: %+v", err)
+ }
+
+ isResponse := broker.keeper.IsSyncResponse(message.GetParentID())
+ if !isResponse {
+ return message, nil
+ }
+
+ err = broker.keeper.SendToKeepChannel(message)
+ }
+}
+
+// SendSyncInternal sync mode
+func (broker *RemoteBroker) SendSyncInternal(conn wrapper.Conn, message model.Message, timeout time.Duration) (model.Message, error) {
+ if timeout <= 0 {
+ timeout = syncMessageTimeoutDefault
+ }
+
+ // make sure to set sync flag
+ message.Header.Sync = true
+
+ err := conn.WriteJSON(&message)
+ if err != nil {
+ klog.Errorf("failed to write with error %+v", err)
+ return model.Message{}, fmt.Errorf("failed to write, error: %+v", err)
+ }
+
+ deadline := time.Now().Add(timeout)
+ err = conn.SetReadDeadline(deadline)
+ var response model.Message
+ err = conn.ReadJSON(&response)
+ if err != nil {
+ klog.Errorf("failed to read with error %+v", err)
+ return model.Message{}, fmt.Errorf("failed to read, error: %+v", err)
+ }
+
+ return response, nil
+}
+
+// SendSync sync mode
+func (broker *RemoteBroker) SendSync(conn wrapper.Conn, message model.Message, timeout time.Duration) (model.Message, error) {
+ if timeout <= 0 {
+ timeout = syncMessageTimeoutDefault
+ }
+
+ deadline := time.Now().Add(timeout)
+
+ // make sure to set sync flag
+ message.Header.Sync = true
+
+ err := conn.WriteJSON(&message)
+ if err != nil {
+ klog.Errorf("failed to write with error %+v", err)
+ return model.Message{}, fmt.Errorf("failed to write, error: %+v", err)
+ }
+
+ tempChannel := broker.keeper.AddKeepChannel(message.GetID())
+ sendTimer := time.NewTimer(time.Until(deadline))
+ select {
+ case response := <-tempChannel:
+ sendTimer.Stop()
+ broker.keeper.DeleteKeepChannel(response.GetParentID())
+ return response, nil
+ case <-sendTimer.C:
+ klog.Warningf("timeout to receive response for message: %s", message.String())
+ broker.keeper.DeleteKeepChannel(message.GetID())
+ return model.Message{}, fmt.Errorf("timeout to receive response for message: %s", message.String())
+ }
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/config/config.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/config/config.go
new file mode 100644
index 000000000..c6dace012
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/config/config.go
@@ -0,0 +1,88 @@
+package config
+
+import (
+ "fmt"
+ "os"
+
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/yaml"
+)
+
+// SocketConfig socket config
+type SocketConfig struct {
+ ModuleName string `json:"module"`
+ Server bool `json:"server"`
+ Address string `json:"address"`
+ SocketType string `json:"sockettype,omitempty"`
+ ConnNumberMax int `json:"connmax"`
+ BufferSize int `json:"buffersize,omitempty"`
+ CaRoot string `json:"ca,omitempty"`
+ Cert string `json:"cert,omitempty"`
+ Key string `json:"key,omitempty"`
+}
+
+// BuildinModuleConfig buildin module config
+type BuildinModuleConfig struct {
+ // socket
+ socketList []SocketConfig
+}
+
+func init() {
+ var filepath string
+ filepath = os.Getenv("SOCKET_MODULE_CONFIG")
+ if filepath == "" {
+ filepath = "/etc/kubeedge/config/socket_module.yaml"
+ if _, err := os.Stat(filepath); err != nil {
+ return
+ }
+ }
+
+ buildinModuleConfig = InitBuildinModuleConfig(filepath)
+}
+
+var (
+ buildinModuleConfig *BuildinModuleConfig
+)
+
+// InitBuildinModuleConfig init buildin module config
+func InitBuildinModuleConfig(filepath string) *BuildinModuleConfig {
+ moduleConfig := BuildinModuleConfig{}
+ data, err := os.ReadFile(filepath)
+ if err != nil {
+ klog.Errorf("failed to read file %v: %v", filepath, err)
+ return nil
+ }
+ err = yaml.Unmarshal(data, &moduleConfig.socketList)
+ if err != nil {
+ klog.Errorf("failed to yaml unmarshal config: %v", err)
+ return nil
+ }
+
+ return &moduleConfig
+}
+
+// GetClientSocketConfig get client socket config
+func GetClientSocketConfig(module string) (SocketConfig, error) {
+ for _, socketConfig := range buildinModuleConfig.socketList {
+ if socketConfig.ModuleName == module && !socketConfig.Server {
+ return socketConfig, nil
+ }
+ }
+
+ return SocketConfig{}, fmt.Errorf("failed to get socket config by name(%s)", module)
+}
+
+// GetServerSocketConfig get server socket config
+func GetServerSocketConfig() ([]SocketConfig, error) {
+ var serversSocket []SocketConfig
+ for _, socketConfig := range buildinModuleConfig.socketList {
+ if socketConfig.Server {
+ serversSocket = append(serversSocket, socketConfig)
+ }
+ }
+
+ if len(serversSocket) != 0 {
+ return serversSocket, nil
+ }
+ return []SocketConfig{}, fmt.Errorf("failed to get socket config")
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/context_socket.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/context_socket.go
new file mode 100644
index 000000000..931ff0142
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/context_socket.go
@@ -0,0 +1,376 @@
+package socket
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/common"
+ "github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/beehive/pkg/core/socket/broker"
+ "github.com/kubeedge/beehive/pkg/core/socket/config"
+ "github.com/kubeedge/beehive/pkg/core/socket/store"
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper"
+)
+
+// Context is object for Context Socket
+type Context struct {
+ // module -> context
+ contexts map[string]*context
+ // group -> context
+ groups map[string]*context
+ sync.RWMutex
+}
+
+var globalSocketContext Context
+var once = sync.Once{}
+
+func InitSocketContext() *Context {
+ once.Do(func() {
+ globalSocketContext.contexts = make(map[string]*context)
+ globalSocketContext.groups = make(map[string]*context)
+ })
+ return &globalSocketContext
+}
+
+func (s *Context) AddModule(info *common.ModuleInfo) {
+ name := info.ModuleName
+ s.setContext(name)
+ context := s.getContext(name)
+ if !info.IsRemote {
+ context.AddModule(name, info.Connection)
+ } else {
+ context.AddModuleRemote(name)
+ }
+}
+
+// AddModuleGroup add module group
+func (s *Context) AddModuleGroup(module, group string) {
+ s.Lock()
+ s.groups[module] = s.contexts[module]
+ s.Unlock()
+
+ s.getContext(module).AddModuleGroup(module, group)
+}
+
+// Cleanup cleanup
+func (s *Context) Cleanup(module string) {
+ s.getContext(module).Cleanup(module)
+}
+
+// Send send
+func (s *Context) Send(module string, message model.Message) {
+ s.getContext(module).Send(module, message)
+}
+
+// Receive receive
+func (s *Context) Receive(module string) (model.Message, error) {
+ return s.getContext(module).Receive(module)
+}
+
+// SendSync send sync
+func (s *Context) SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error) {
+ return s.getContext(module).SendSync(module, message, timeout)
+}
+
+// SendResp send the response that got by NewRespByMessage
+func (s *Context) SendResp(message model.Message) {
+ module := message.GetSource()
+ s.getContext(module).SendResp(message)
+}
+
+// SendToGroup send to group
+func (s *Context) SendToGroup(group string, message model.Message) {
+ s.getGroupContext(group).SendToGroup(group, message)
+}
+
+// SendToGroupSync send to group sync
+func (s *Context) SendToGroupSync(module string, message model.Message, timeout time.Duration) error {
+ return s.getContext(module).SendToGroupSync(module, message, timeout)
+}
+
+func (s *Context) getGroupContext(group string) *context {
+ s.RLock()
+ defer s.RUnlock()
+ return s.groups[group]
+}
+
+func (s *Context) getContext(module string) *context {
+ s.RLock()
+ defer s.RUnlock()
+ return s.contexts[module]
+}
+
+func (s *Context) setContext(module string) {
+ s.Lock()
+ defer s.Unlock()
+ s.contexts[module] = newContext(module)
+}
+
+// context module socket
+type context struct {
+ name string
+ address string
+ moduleType string
+ bufferSize int
+
+ certificate tls.Certificate
+ store *store.PipeStore
+ broker *broker.RemoteBroker
+}
+
+// newContext new module socket
+func newContext(module string) *context {
+ sConfig, err := config.GetClientSocketConfig(module)
+ if err != nil {
+ klog.Errorf("failed to get config with error %+v", err)
+ return nil
+ }
+
+ certificate, err := getCert(&sConfig)
+ if err != nil {
+ klog.Errorf("failed to get cert with error %+v", err)
+ }
+
+ remoteBroker := broker.NewRemoteBroker()
+
+ return &context{
+ name: sConfig.ModuleName,
+ moduleType: sConfig.SocketType,
+ address: sConfig.Address,
+ bufferSize: sConfig.BufferSize,
+ certificate: certificate,
+ broker: remoteBroker,
+ store: store.NewPipeStore(),
+ }
+}
+
+// AddModuleRemote add module remote
+func (m *context) AddModuleRemote(module string) {
+ klog.Infof("add remote module: %s", module)
+ conn := m.Connect(module, GetConnectFunc(m.moduleType))
+ if conn == nil {
+ // never come here !!
+ klog.Errorf("failed to connect")
+ }
+}
+
+// AddModule add module
+func (m *context) AddModule(module string, usConn interface{}) {
+ klog.Infof("add module: %v", module)
+ conn, ok := usConn.(wrapper.Conn)
+ if !ok {
+ klog.Errorf("failed to add module, bad us conn")
+ return
+ }
+ m.store.Add(module, conn)
+}
+
+// AddModuleGroup add module group
+func (m *context) AddModuleGroup(module, group string) {
+ klog.Infof("add module(%v) to group(%v)", module, group)
+ pipeInfo, err := m.store.Get(module)
+ if err != nil {
+ klog.Warningf("bad module name %s", module)
+ return
+ }
+
+ conn := pipeInfo.Wrapper()
+ if conn != nil {
+ m.store.AddGroup(module, group, conn)
+ }
+}
+
+// Cleanup cleanup
+func (m *context) Cleanup(module string) {
+ klog.Infof("clean up module: %s", module)
+ pipeInfo, err := m.store.Get(module)
+ if err != nil {
+ return
+ }
+
+ conn := pipeInfo.Wrapper()
+ if conn != nil {
+ err = conn.Close()
+ }
+ m.store.Delete(module)
+}
+
+// Send send
+func (m *context) Send(module string, message model.Message) {
+ pipeInfo, err := m.store.Get(module)
+ if err != nil {
+ klog.Warningf("failed to get module %s", module)
+ return
+ }
+ message.SetType(m.moduleType)
+ message.SetDestination(module)
+ conn := pipeInfo.Wrapper()
+ if conn != nil {
+ err = m.broker.Send(conn, message)
+ return
+ }
+ klog.Warningf("bad module name %s", module)
+}
+
+// Receive receive
+func (m *context) Receive(module string) (model.Message, error) {
+ pipeInfo, err := m.store.Get(module)
+ if err != nil {
+ klog.Warningf("failed to get module pipe: %s", module)
+ return model.Message{}, fmt.Errorf("failed to get module pipe: %v", err)
+ }
+
+ conn := pipeInfo.Wrapper()
+ if conn != nil {
+ return m.broker.Receive(conn)
+ }
+
+ klog.Warningf("bad module name: %s", module)
+ return model.Message{}, fmt.Errorf("bad module name(%s)", module)
+}
+
+// SendSync send sync
+func (m *context) SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error) {
+ pipeInfo, err := m.store.Get(module)
+ if err != nil {
+ klog.Warningf("failed to get module pipe: %s", module)
+ return model.Message{}, fmt.Errorf("failed to get module pipe: %v", err)
+ }
+
+ conn := pipeInfo.Wrapper()
+ if conn == nil {
+ klog.Warningf("bad module name: %s", module)
+ return model.Message{}, fmt.Errorf("bad module name(%s)", module)
+ }
+ message.SetType(m.moduleType)
+ message.SetDestination(module)
+ return m.broker.SendSync(conn, message, timeout)
+}
+
+// SendResp send the response that got by NewRespByMessage
+func (m *context) SendResp(message model.Message) {
+ pipeInfo, err := m.store.Get(message.GetSource())
+ if err != nil {
+ klog.Warningf("failed to get module:%s", message.GetSource())
+ return
+ }
+
+ conn := pipeInfo.Wrapper()
+ if conn == nil {
+ klog.Warningf("bad module name:%s", message.GetSource())
+ return
+ }
+ message.SetDestination(message.GetSource())
+ err = m.broker.Send(conn, message)
+}
+
+// SendToGroup send to group
+func (m *context) SendToGroup(group string, message model.Message) {
+ var err error
+ walkFunc := func(module string, pipe store.PipeInfo) error {
+ conn := pipe.Wrapper()
+ if conn == nil {
+ klog.Warningf("bad pipe")
+ return nil
+ }
+ message.SetDestination(module)
+ err = m.broker.Send(conn, message)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ err = m.store.WalkGroup(group, walkFunc)
+}
+
+// SendToGroupSync send to group sync
+func (*context) SendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
+ return fmt.Errorf("not supported now")
+}
+
+// ModuleExchange module exchange
+type ModuleExchange struct {
+ Modules []string `json:"modules"`
+ Groups map[string][]string `json:"groups"`
+}
+
+func (m *context) exchangeModuleInfo(conn wrapper.Conn, module string) error {
+ moduleMsg := model.NewMessage("").
+ BuildRouter(module, "", common.ResourceTypeModule, common.OperationTypeModule).
+ SetType(m.moduleType).
+ FillBody("")
+ resp, err := m.broker.SendSyncInternal(conn, *moduleMsg, 0)
+ if err != nil {
+ klog.Errorf("failed to send module message with error %+v", err)
+ return fmt.Errorf("failed to send module message, response:%+v, error: %+v", resp, err)
+ }
+
+ var exchange ModuleExchange
+ bytes, err := json.Marshal(resp.GetContent())
+ if err != nil {
+ klog.Errorf("failed to marshal response with error %+v", err)
+ return fmt.Errorf("failed to marshal response, error: %+v", err)
+ }
+
+ err = json.Unmarshal(bytes, &exchange)
+ if err != nil {
+ klog.Errorf("bad modules info from remote with error %+v", err)
+ return fmt.Errorf("bad modules info from remote %+v with error %s", resp, err.Error())
+ }
+
+ // add modules into store
+ // all the remote module use the conn from this side
+ for _, name := range exchange.Modules {
+ if name == module {
+ continue
+ }
+ klog.Infof("socket module: %s", name)
+ m.store.Add(name, conn)
+ }
+
+ // add group into store
+ for group, modules := range exchange.Groups {
+ for _, module := range modules {
+ m.store.AddGroup(module, group, conn)
+ }
+ }
+
+ klog.Infof("success to send module message")
+ return nil
+}
+
+func (m *context) Connect(module string, connect broker.ConnectFunc) wrapper.Conn {
+ opts := broker.ConnectOptions{
+ Address: m.address,
+ MessageType: m.moduleType,
+ BufferSize: m.bufferSize,
+ Cert: m.certificate,
+ }
+
+ for {
+ conn := m.broker.Connect(opts, connect)
+ if conn == nil {
+ time.Sleep(connectPeriod)
+ continue
+ }
+
+ m.AddModule(module, conn)
+
+ // send module message
+ err := m.exchangeModuleInfo(conn, module)
+ if err == nil {
+ return conn
+ }
+ klog.Errorf("error to connect with %+v", err)
+
+ // try to redial
+ err = conn.Close()
+ time.Sleep(connectPeriod)
+ }
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/helper.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/helper.go
new file mode 100644
index 000000000..166e54861
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/helper.go
@@ -0,0 +1,57 @@
+package socket
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "time"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/common"
+ "github.com/kubeedge/beehive/pkg/core/socket/broker"
+ "github.com/kubeedge/beehive/pkg/core/socket/config"
+)
+
+const (
+ connectPeriod = 5 * time.Second
+
+ // TODO: configurable
+ HandshakeTimeout = 60 * time.Second
+)
+
+func getCert(config *config.SocketConfig) (tls.Certificate, error) {
+ if config.Key == "" &&
+ config.Cert == "" {
+ return tls.Certificate{}, nil
+ }
+
+ var err error
+ var certificate tls.Certificate
+ if config.Cert != "" && config.Key != "" {
+ certificate, err = tls.LoadX509KeyPair(config.Cert, config.Key)
+ } else {
+ err = fmt.Errorf("failed to get x509 key pair")
+ }
+ return certificate, err
+}
+
+// GetConnectFunc get connect func
+func GetConnectFunc(moduleType string) broker.ConnectFunc {
+ switch moduleType {
+ case common.MsgCtxTypeUS:
+ return Connect
+ }
+ klog.Warningf("not supported module type: %v", moduleType)
+ return nil
+}
+
+// Connect socket connect
+func Connect(opts broker.ConnectOptions) (interface{}, error) {
+ conn, err := net.Dial(opts.MessageType, opts.Address)
+ if err != nil {
+ klog.Errorf("failed to dail addrs: %s", opts.Address)
+ return nil, err
+ }
+ return conn, nil
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipe.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipe.go
new file mode 100644
index 000000000..72c269831
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipe.go
@@ -0,0 +1,108 @@
+package store
+
+import (
+ "fmt"
+ "sync"
+
+ "k8s.io/klog/v2"
+)
+
+// PipeStore pipe store
+type PipeStore struct {
+ pipeMap map[string]PipeInfo
+ pipeMapLock sync.RWMutex
+ groupPipeMap map[string]map[string]PipeInfo
+ groupPipeMapLock sync.RWMutex
+}
+
+// NewPipeStore new pipe store
+func NewPipeStore() *PipeStore {
+ return &PipeStore{
+ pipeMap: make(map[string]PipeInfo),
+ groupPipeMap: make(map[string]map[string]PipeInfo),
+ }
+}
+
+// Add add
+func (s *PipeStore) Add(module string, pipe interface{}) {
+ s.pipeMapLock.Lock()
+ defer s.pipeMapLock.Unlock()
+ s.pipeMap[module] = PipeInfo{pipe: pipe}
+}
+
+// Delete delete
+func (s *PipeStore) Delete(module string) {
+ // delete module conn from conn map
+ s.pipeMapLock.Lock()
+ _, exist := s.pipeMap[module]
+ if !exist {
+ klog.Warningf("failed to get pipe, module: %s", module)
+ return
+ }
+ delete(s.pipeMap, module)
+ s.pipeMapLock.Unlock()
+
+ // delete module conn from group conn map
+ s.groupPipeMapLock.Lock()
+ for _, moduleMap := range s.groupPipeMap {
+ if _, exist := moduleMap[module]; exist {
+ delete(moduleMap, module)
+ break
+ }
+ }
+ s.groupPipeMapLock.Unlock()
+}
+
+// Get get
+func (s *PipeStore) Get(module string) (PipeInfo, error) {
+ s.pipeMapLock.RLock()
+ defer s.pipeMapLock.RUnlock()
+
+ if info, exist := s.pipeMap[module]; exist {
+ return info, nil
+ }
+ return PipeInfo{}, fmt.Errorf("failed to get module(%s)", module)
+}
+
+// AddGroup add group
+func (s *PipeStore) AddGroup(module, group string, pipe interface{}) {
+ s.groupPipeMapLock.Lock()
+ defer s.groupPipeMapLock.Unlock()
+
+ if _, exist := s.groupPipeMap[group]; !exist {
+ s.groupPipeMap[group] = make(map[string]PipeInfo)
+ }
+ s.groupPipeMap[group][module] = PipeInfo{pipe: pipe}
+}
+
+// GetGroup get group
+func (s *PipeStore) GetGroup(group string) map[string]PipeInfo {
+ s.groupPipeMapLock.RLock()
+ defer s.groupPipeMapLock.RUnlock()
+
+ if _, exist := s.groupPipeMap[group]; exist {
+ return s.groupPipeMap[group]
+ }
+ klog.Warningf("failed to get group, type: %s", group)
+ return nil
+}
+
+// WalkGroup walk group
+func (s *PipeStore) WalkGroup(group string, walkFunc func(string, PipeInfo) error) error {
+ s.groupPipeMapLock.RLock()
+ defer s.groupPipeMapLock.RUnlock()
+
+ if _, exist := s.groupPipeMap[group]; !exist {
+ klog.Warningf("failed to get group, type: %s", group)
+ return fmt.Errorf("failed to get group, type(%s)", group)
+ }
+
+ for module, pipe := range s.groupPipeMap[group] {
+ err := walkFunc(module, pipe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipeinfo.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipeinfo.go
new file mode 100644
index 000000000..5018ccb06
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/store/pipeinfo.go
@@ -0,0 +1,42 @@
+package store
+
+import (
+ "net"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/core/model"
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper"
+)
+
+// PipeInfo pipe info
+type PipeInfo struct {
+ pipe interface{}
+}
+
+// Channel channel
+func (info *PipeInfo) Channel() chan model.Message {
+ if ch, ok := info.pipe.(chan model.Message); ok {
+ return ch
+ }
+ klog.Warning("failed to get channel")
+ return nil
+}
+
+// Socket socket
+func (info *PipeInfo) Socket() net.Conn {
+ if socket, ok := info.pipe.(net.Conn); ok {
+ return socket
+ }
+ klog.Warning("failed to get socket")
+ return nil
+}
+
+// Wrapper wrapper
+func (info *PipeInfo) Wrapper() wrapper.Conn {
+ if socket, ok := info.pipe.(wrapper.Conn); ok {
+ return socket
+ }
+ klog.Warning("failed to get conn wrapper")
+ return nil
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/synckeeper/keeper.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/synckeeper/keeper.go
new file mode 100644
index 000000000..5d38fd1a4
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/synckeeper/keeper.go
@@ -0,0 +1,68 @@
+package synckeeper
+
+import (
+ "fmt"
+ "sync"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/core/model"
+)
+
+// Keeper keeper
+type Keeper struct {
+ syncKeeper map[string]chan model.Message
+ keeperLock sync.RWMutex
+}
+
+// NewKeeper new keeper
+func NewKeeper() *Keeper {
+ return &Keeper{
+ syncKeeper: make(map[string]chan model.Message),
+ }
+}
+
+// SendToKeepChannel send to keep channel
+func (k *Keeper) SendToKeepChannel(message model.Message) error {
+ k.keeperLock.RLock()
+ defer k.keeperLock.RUnlock()
+
+ channel, exist := k.syncKeeper[message.GetParentID()]
+ if !exist {
+ klog.Errorf("failed to get sync keeper channel, message: %s", message.String())
+ return fmt.Errorf("failed to get sync keeper channel, message:%s", message.String())
+ }
+
+ // send response into synckeep channel
+ select {
+ case channel <- message:
+ default:
+ klog.Errorf("failed to send message to sync keep channel")
+ return fmt.Errorf("failed to send message to sync keep channel")
+ }
+ return nil
+}
+
+// AddKeepChannel add keep channel
+func (k *Keeper) AddKeepChannel(msgID string) chan model.Message {
+ k.keeperLock.Lock()
+ defer k.keeperLock.Unlock()
+ tempChannel := make(chan model.Message)
+ k.syncKeeper[msgID] = tempChannel
+ return tempChannel
+}
+
+// DeleteKeepChannel delete keep channel
+func (k *Keeper) DeleteKeepChannel(msgID string) {
+ k.keeperLock.Lock()
+ defer k.keeperLock.Unlock()
+ delete(k.syncKeeper, msgID)
+}
+
+// IsSyncResponse is sync response
+func (k *Keeper) IsSyncResponse(msgID string) bool {
+ k.keeperLock.RLock()
+ defer k.keeperLock.RUnlock()
+ _, exist := k.syncKeeper[msgID]
+ return exist
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/packer/packer.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/packer/packer.go
new file mode 100644
index 000000000..790061119
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/packer/packer.go
@@ -0,0 +1,114 @@
+package packer
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+
+ "k8s.io/klog/v2"
+)
+
+const (
+ magicSize = 4
+ versionSize = 2
+ reservedSize = 2
+
+ // MessageLenOffest message len offest
+ MessageLenOffest = magicSize + versionSize + reservedSize
+ // MessageOffset message offset
+ MessageOffset = MessageLenOffest + 4
+ // HeaderLen header len
+ HeaderLen = MessageOffset
+)
+
+var (
+ headerTags = [HeaderLen]byte{'b', 'e', 'e', 'h', 'v', '1', 'r', 'v', 0, 0, 0, 0}
+)
+
+// Packer packer
+type Packer struct {
+ Magic [magicSize]byte
+ Version [versionSize]byte
+ Reserved [reservedSize]byte
+ Length int32
+ Message []byte
+}
+
+// NewPacker new packer
+func NewPacker() *Packer {
+ return &Packer{
+ Magic: [magicSize]byte{'b', 'e', 'e', 'h'},
+ Version: [versionSize]byte{'v', '1'},
+ Reserved: [reservedSize]byte{'r', 'v'},
+ }
+}
+
+// Validate validate
+func (p *Packer) Validate(data []byte) bool {
+ if len(data) <= HeaderLen {
+ return false
+ }
+ if !bytes.Equal(data[:magicSize], p.Magic[:magicSize]) {
+ return false
+ }
+ if !bytes.Equal(data[magicSize:magicSize+versionSize], p.Version[:versionSize]) {
+ return false
+ }
+ return true
+}
+
+// Write write
+func (p *Packer) Write(writer io.Writer) error {
+ // fill message len
+ headerTags[MessageLenOffest] = byte(uint32(p.Length) >> 24)
+ headerTags[MessageLenOffest+1] = byte(uint32(p.Length) >> 16)
+ headerTags[MessageLenOffest+2] = byte(uint32(p.Length) >> 8)
+ headerTags[MessageLenOffest+3] = byte(uint32(p.Length))
+ err := binary.Write(writer, binary.BigEndian, &headerTags)
+ if err != nil {
+ return err
+ }
+ err = binary.Write(writer, binary.BigEndian, &p.Message)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Read read
+func (p *Packer) Read(reader io.Reader) error {
+ err := binary.Read(reader, binary.BigEndian, &p.Magic)
+ if err != nil {
+ return err
+ }
+ err = binary.Read(reader, binary.BigEndian, &p.Version)
+ if err != nil {
+ return err
+ }
+ err = binary.Read(reader, binary.BigEndian, &p.Reserved)
+ if err != nil {
+ return err
+ }
+ err = binary.Read(reader, binary.BigEndian, &p.Length)
+ if err != nil {
+ return err
+ }
+ err = binary.Read(reader, binary.BigEndian, &p.Message)
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+// GetMessageLen get message len
+func (p *Packer) GetMessageLen(data []byte) int32 {
+ length := int32(0)
+ if len(data) < MessageOffset {
+ return length
+ }
+ err := binary.Read(bytes.NewReader(data[MessageLenOffest:MessageOffset]), binary.BigEndian, &length)
+ if err != nil {
+ klog.Errorf("binary Read err %+v", err)
+ }
+ return length
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/package.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/package.go
new file mode 100644
index 000000000..31c7587d1
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/package.go
@@ -0,0 +1,79 @@
+package reader
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net"
+ "sync"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper/packer"
+)
+
+// PackageReader package reader
+type PackageReader struct {
+ scanner *bufio.Scanner
+ lock sync.Mutex
+ packer *packer.Packer
+}
+
+// NewPackageReader new package reader
+func NewPackageReader(obj interface{}, buffSize int) *PackageReader {
+ if buffSize <= 0 {
+ klog.Errorf("bad buffer size %d", buffSize)
+ return nil
+ }
+
+ conn, ok := obj.(net.Conn)
+ if !ok {
+ klog.Errorf("bad conn obj")
+ return nil
+ }
+
+ p := packer.NewPacker()
+ splitFunc := func(data []byte, eof bool) (advance int, token []byte, err error) {
+ if !eof && p.Validate(data) {
+ length := p.GetMessageLen(data)
+ packageLen := int(length) + packer.MessageOffset
+ if packageLen <= len(data) {
+ return packageLen, data[packer.MessageOffset:packageLen], nil
+ }
+ }
+ return
+ }
+ scanner := bufio.NewScanner(conn)
+ scanner.Split(splitFunc)
+ scanner.Buffer(make([]byte, buffSize), buffSize)
+
+ return &PackageReader{
+ scanner: scanner,
+ packer: p,
+ }
+}
+
+// Read read
+func (reader *PackageReader) Read() ([]byte, error) {
+ reader.lock.Lock()
+ defer reader.lock.Unlock()
+
+ gotPackage := reader.scanner.Scan()
+ if !gotPackage || reader.scanner.Err() != nil {
+ return nil, fmt.Errorf("failed to scann package, error: %+v", reader.scanner.Err())
+ }
+ return reader.scanner.Bytes(), nil
+}
+
+// ReadJSON read json
+func (reader *PackageReader) ReadJSON(obj interface{}) error {
+ reader.lock.Lock()
+ defer reader.lock.Unlock()
+
+ gotPackage := reader.scanner.Scan()
+ if !gotPackage || reader.scanner.Err() != nil {
+ return fmt.Errorf("failed to scann package, error: %+v", reader.scanner.Err())
+ }
+ return json.NewDecoder(bytes.NewReader(reader.scanner.Bytes())).Decode(obj)
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/raw.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/raw.go
new file mode 100644
index 000000000..8374e9d17
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/raw.go
@@ -0,0 +1,64 @@
+package reader
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "sync"
+
+ "k8s.io/klog/v2"
+)
+
+// RawReader raw reader
+type RawReader struct {
+ conn net.Conn
+ lock sync.Mutex
+ buffer []byte
+ buffSize int
+}
+
+// NewRawReader new raw reader
+func NewRawReader(conn interface{}, buffSize int) *RawReader {
+ if conn, ok := conn.(net.Conn); ok {
+ return &RawReader{
+ conn: conn,
+ buffSize: buffSize,
+ }
+ }
+ klog.Warning("bad conn interface")
+ return nil
+}
+
+// Read read
+func (r *RawReader) Read() ([]byte, error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if r.buffer == nil {
+ r.buffer = make([]byte, r.buffSize)
+ }
+
+ nr, err := r.conn.Read(r.buffer)
+ if err != nil {
+ klog.Errorf("failed to read, error %+v", err)
+ return nil, fmt.Errorf("failed to read, error: %+v", err)
+ }
+ return r.buffer[:nr], nil
+}
+
+// ReadJSON read json
+func (r *RawReader) ReadJSON(obj interface{}) error {
+ //r.lock.Lock()
+ //defer r.lock.Unlock()
+ //return json.NewDecoder(r.conn).Decode(obj)
+ buf, err := r.Read()
+ if err != nil {
+ return err
+ }
+ err = json.Unmarshal(buf, obj)
+ if err != nil {
+ klog.Errorf("failed to unmarshal message, context: %s, errpr: %+v", string(buf), err)
+ return fmt.Errorf("failed to unmarshal message, error:%+v, context: %+v", err, string(buf))
+ }
+ return nil
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/reader.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/reader.go
new file mode 100644
index 000000000..332dc153e
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader/reader.go
@@ -0,0 +1,30 @@
+package reader
+
+import (
+ "k8s.io/klog/v2"
+)
+
+const (
+ // ReaderTypeRaw reader type raw
+ ReaderTypeRaw = "raw"
+ // ReaderTypePackage reader type package
+ ReaderTypePackage = "package"
+)
+
+// Reader reader
+type Reader interface {
+ Read() ([]byte, error)
+ ReadJSON(obj interface{}) error
+}
+
+// NewReader new reader
+func NewReader(readerType string, conn interface{}, buffSize int) Reader {
+ switch readerType {
+ case ReaderTypeRaw:
+ return NewRawReader(conn, buffSize)
+ case ReaderTypePackage:
+ return NewPackageReader(conn, buffSize)
+ }
+ klog.Errorf("bad reader type: %s", readerType)
+ return nil
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/wrapper.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/wrapper.go
new file mode 100644
index 000000000..712be9c3c
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/wrapper.go
@@ -0,0 +1,91 @@
+package wrapper
+
+import (
+ "net"
+ "time"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper/reader"
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer"
+)
+
+// Conn conn
+type Conn interface {
+ Read() ([]byte, error)
+ Write(message []byte) error
+
+ ReadJSON(obj interface{}) error
+ WriteJSON(obj interface{}) error
+
+ Close() error
+
+ SetReadDeadline(t time.Time) error
+}
+
+// ConnWrapper conn wrapper
+type ConnWrapper struct {
+ conn interface{}
+ reader reader.Reader
+ writer writer.Writer
+}
+
+// NewWrapper new wrapper
+func NewWrapper(connType string, conn interface{}, buffSize int) Conn {
+ readerType := reader.ReaderTypeRaw
+ writerType := writer.WriterTypeRaw
+
+ return &ConnWrapper{
+ conn: conn,
+ reader: reader.NewReader(readerType, conn, buffSize),
+ writer: writer.NewWriter(writerType, conn),
+ }
+}
+
+// Read read
+func (w *ConnWrapper) Read() ([]byte, error) {
+ return w.reader.Read()
+}
+
+// Write write
+func (w *ConnWrapper) Write(message []byte) error {
+ return w.writer.Write(message)
+}
+
+// ReadJSON read json
+func (w *ConnWrapper) ReadJSON(obj interface{}) error {
+ return w.reader.ReadJSON(obj)
+}
+
+// WriteJSON write json
+func (w *ConnWrapper) WriteJSON(obj interface{}) error {
+ return w.writer.WriteJSON(obj)
+}
+
+// SetReadDeadline set read deadline
+func (w *ConnWrapper) SetReadDeadline(t time.Time) error {
+ // TODO: put int Deadline
+ var err error
+ switch w.conn.(type) {
+ case net.Conn:
+ conn := w.conn.(net.Conn)
+ err = conn.SetReadDeadline(t)
+ default:
+ klog.Warningf("unsupported conn type: %T", w.conn)
+ }
+ return err
+}
+
+// Close close
+func (w *ConnWrapper) Close() error {
+ // TODO: put into Closer
+ var err error
+ switch w.conn.(type) {
+ case net.Conn:
+ conn := w.conn.(net.Conn)
+ err = conn.Close()
+ default:
+ klog.Warningf("unsupported conn type: %T", w.conn)
+ }
+ return err
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/package.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/package.go
new file mode 100644
index 000000000..cef23b6a6
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/package.go
@@ -0,0 +1,67 @@
+package writer
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "sync"
+
+ "k8s.io/klog/v2"
+
+ "github.com/kubeedge/beehive/pkg/core/socket/wrapper/packer"
+)
+
+// PackageWriter package writer
+type PackageWriter struct {
+ packer *packer.Packer
+ conn net.Conn
+ lock sync.Mutex
+}
+
+// NewPackageWriter new package writer
+func NewPackageWriter(obj interface{}) *PackageWriter {
+ if conn, ok := obj.(net.Conn); ok {
+ packer := packer.NewPacker()
+ return &PackageWriter{
+ conn: conn,
+ packer: packer,
+ }
+ }
+ klog.Errorf("bad conn obj")
+ return nil
+}
+
+// Write write
+func (w *PackageWriter) Write(message []byte) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.packer.Message = message
+ w.packer.Length = int32(len(message))
+ err := w.packer.Write(w.conn)
+ if err != nil {
+ klog.Errorf("failed to packer with error %+v", err)
+ return fmt.Errorf("failed to packer, error:%+v", err)
+ }
+ return nil
+}
+
+// WriteJSON write json
+func (w *PackageWriter) WriteJSON(obj interface{}) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ objBytes, err := json.Marshal(obj)
+ if err != nil {
+ klog.Errorf("failed to marshal obj, error:%+v", err)
+ return err
+ }
+ w.packer.Message = objBytes
+ w.packer.Length = int32(len(objBytes))
+ err = w.packer.Write(w.conn)
+ if err != nil {
+ klog.Errorf("failed to packer, error:%+v", err)
+ return fmt.Errorf("failed to packer, error:%+v", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/raw.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/raw.go
new file mode 100644
index 000000000..e69fd5772
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/raw.go
@@ -0,0 +1,45 @@
+package writer
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "sync"
+
+ "k8s.io/klog/v2"
+)
+
+// RawWriter raw writer
+type RawWriter struct {
+ conn net.Conn
+ lock sync.Mutex
+}
+
+// NewRawWriter new raw writer
+func NewRawWriter(obj interface{}) *RawWriter {
+ if conn, ok := obj.(net.Conn); ok {
+ return &RawWriter{conn: conn}
+ }
+ klog.Errorf("bad conn ")
+ return nil
+}
+
+// Write write
+func (w *RawWriter) Write(message []byte) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ number, err := w.conn.Write(message)
+ if err != nil || number != len(message) {
+ klog.Errorf("failed to write, error:%+v", err)
+ return fmt.Errorf("failed to write, error: %+v", err)
+ }
+ return nil
+}
+
+// WriteJSON write json
+func (w *RawWriter) WriteJSON(obj interface{}) error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ return json.NewEncoder(w.conn).Encode(obj)
+}
diff --git a/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/writer.go b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/writer.go
new file mode 100644
index 000000000..a1751de6f
--- /dev/null
+++ b/vendor/github.com/kubeedge/beehive/pkg/core/socket/wrapper/writer/writer.go
@@ -0,0 +1,30 @@
+package writer
+
+import (
+ "k8s.io/klog/v2"
+)
+
+const (
+ // WriterTypeRaw writer type raw
+ WriterTypeRaw = "raw"
+ // WriterTypePackage writer type package
+ WriterTypePackage = "package"
+)
+
+// Writer writer
+type Writer interface {
+ Write(message []byte) error
+ WriteJSON(obj interface{}) error
+}
+
+// NewWriter new writer
+func NewWriter(writerType string, conn interface{}) Writer {
+ switch writerType {
+ case WriterTypeRaw:
+ return NewRawWriter(conn)
+ case WriterTypePackage:
+ return NewPackageWriter(conn)
+ }
+ klog.Errorf("bad writer type:%s", writerType)
+ return nil
+}
diff --git a/vendor/github.com/kubeedge/kubeedge/common/constants/default.go b/vendor/github.com/kubeedge/kubeedge/common/constants/default.go
index 75d686849..70f9897f0 100644
--- a/vendor/github.com/kubeedge/kubeedge/common/constants/default.go
+++ b/vendor/github.com/kubeedge/kubeedge/common/constants/default.go
@@ -29,8 +29,9 @@ const (
DefaultCertFile = "/etc/kubeedge/certs/server.crt"
DefaultKeyFile = "/etc/kubeedge/certs/server.key"
- DefaultCAURL = "/ca.crt"
- DefaultCertURL = "/edge.crt"
+ DefaultCAURL = "/ca.crt"
+ DefaultCertURL = "/edge.crt"
+ DefaultNodeUpgradeURL = "/nodeupgrade"
DefaultStreamCAFile = "/etc/kubeedge/ca/streamCA.crt"
DefaultStreamCertFile = "/etc/kubeedge/certs/stream.crt"
@@ -41,15 +42,16 @@ const (
DefaultMqttKeyFile = "/etc/kubeedge/certs/server.key"
// Edged
+ DefaultKubeletConfig = "/etc/kubeedge/config/kubeconfig"
+ DefaultRootDir = "/var/lib/edged"
DefaultDockerAddress = "unix:///var/run/docker.sock"
DefaultRuntimeType = "docker"
+ DefaultDockershimRootDir = "/var/lib/dockershim"
DefaultEdgedMemoryCapacity = 7852396000
DefaultRemoteRuntimeEndpoint = "unix:///var/run/dockershim.sock"
DefaultRemoteImageEndpoint = "unix:///var/run/dockershim.sock"
DefaultPodSandboxImage = "kubeedge/pause:3.1"
- DefaultNodeStatusUpdateFrequency = 10
- DefaultImagePullProgressDeadline = 60
- DefaultRuntimeRequestTimeout = 2
+ DefaultImagePullProgressDeadline = time.Minute
DefaultImageGCHighThreshold = 80
DefaultImageGCLowThreshold = 40
DefaultMaximumDeadContainersPerPod = 1
@@ -64,7 +66,7 @@ const (
DefaultVolumeStatsAggPeriod = time.Minute
DefaultTunnelPort = 10004
- CurrentSupportK8sVersion = "v1.22.6"
+ CurrentSupportK8sVersion = "v1.22.17"
// MetaManager
DefaultRemoteQueryTimeout = 60
@@ -87,10 +89,15 @@ const (
DefaultQueryPersistentVolumeWorkers = 4
DefaultQueryPersistentVolumeClaimWorkers = 4
DefaultQueryVolumeAttachmentWorkers = 4
+ DefaultCreateNodeWorkers = 4
+ DefaultPatchNodeWorkers = 4
DefaultQueryNodeWorkers = 4
DefaultUpdateNodeWorkers = 4
+ DefaultPatchPodWorkers = 4
DefaultDeletePodWorkers = 4
DefaultUpdateRuleStatusWorkers = 4
+ DefaultCreateLeaseWorkers = 4
+ DefaultQueryLeaseWorkers = 4
DefaultServiceAccountTokenWorkers = 4
DefaultUpdatePodStatusBuffer = 1024
@@ -102,9 +109,14 @@ const (
DefaultQueryPersistentVolumeBuffer = 1024
DefaultQueryPersistentVolumeClaimBuffer = 1024
DefaultQueryVolumeAttachmentBuffer = 1024
+ DefaultCreateNodeBuffer = 1024
+ DefaultPatchNodeBuffer = 1024
DefaultQueryNodeBuffer = 1024
DefaultUpdateNodeBuffer = 1024
+ DefaultPatchPodBuffer = 1024
DefaultDeletePodBuffer = 1024
+ DefaultCreateLeaseBuffer = 1024
+ DefaultQueryLeaseBuffer = 1024
DefaultServiceAccountTokenBuffer = 1024
DefaultPodEventBuffer = 1
@@ -121,6 +133,11 @@ const (
DefaultDeviceModelEventBuffer = 1
DefaultUpdateDeviceStatusWorkers = 1
+ // NodeUpgradeJobController
+ DefaultNodeUpgradeJobStatusBuffer = 1024
+ DefaultNodeUpgradeJobEventBuffer = 1
+ DefaultNodeUpgradeJobWorkers = 1
+
// Resource sep
ResourceSep = "/"
@@ -140,7 +157,8 @@ const (
// ServerPort is the default port for the edgecore server on each host machine.
// May be overridden by a flag at startup in the future.
- ServerPort = 10350
+ ServerAddress = "127.0.0.1"
+ ServerPort = 10350
// MessageSuccessfulContent is the successful content value of Message struct
MessageSuccessfulContent string = "OK"
@@ -148,4 +166,12 @@ const (
DefaultBurst = 60
// MaxRespBodyLength is the max length of http response body
MaxRespBodyLength = 1 << 20 // 1 MiB
+
+ EdgeNodeRoleKey = "node-role.kubernetes.io/edge"
+ EdgeNodeRoleValue = ""
+
+ DeafultMosquittoContainerName = "mqtt-kubeedge"
+
+ DeployMqttContainerEnv = "DEPLOY_MQTT_CONTAINER"
+ DeployMqttContainerImageEnv = "DEPLOY_MQTT_CONTAINER_IMAGE"
)
diff --git a/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/device_model_types.go b/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/device_model_types.go
index 6d7260251..2a809dcdf 100644
--- a/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/device_model_types.go
+++ b/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/device_model_types.go
@@ -23,6 +23,8 @@ import (
// DeviceModelSpec defines the model / template for a device.It is a blueprint which describes the device
// capabilities and access mechanism via property visitors.
type DeviceModelSpec struct {
+ // Required for DMI: Protocol name used by the device.
+ Protocol string `json:"protocol,omitempty"`
// Required: List of device properties.
Properties []DeviceProperty `json:"properties,omitempty"`
}
diff --git a/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/zz_generated.deepcopy.go
index 3571b3db4..a271226b7 100644
--- a/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/zz_generated.deepcopy.go
+++ b/vendor/github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2/zz_generated.deepcopy.go
@@ -1,3 +1,4 @@
+//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
diff --git a/vendor/github.com/kubeedge/kubeedge/pkg/apis/rules/v1/zz_generated.deepcopy.go b/vendor/github.com/kubeedge/kubeedge/pkg/apis/rules/v1/zz_generated.deepcopy.go
index c0c8c1222..b7316db9b 100644
--- a/vendor/github.com/kubeedge/kubeedge/pkg/apis/rules/v1/zz_generated.deepcopy.go
+++ b/vendor/github.com/kubeedge/kubeedge/pkg/apis/rules/v1/zz_generated.deepcopy.go
@@ -1,3 +1,4 @@
+//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
diff --git a/vendor/github.com/kubeedge/kubeedge/tests/e2e/constants/constants.go b/vendor/github.com/kubeedge/kubeedge/tests/e2e/constants/constants.go
index db1f9a92a..0f5952490 100644
--- a/vendor/github.com/kubeedge/kubeedge/tests/e2e/constants/constants.go
+++ b/vendor/github.com/kubeedge/kubeedge/tests/e2e/constants/constants.go
@@ -1,6 +1,11 @@
package constants
+import "time"
+
const (
AppHandler = "/api/v1/namespaces/default/pods"
DeploymentHandler = "/apis/apps/v1/namespaces/default/deployments"
+
+ Interval = 5 * time.Second
+ Timeout = 10 * time.Minute
)
diff --git a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/common.go b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/common.go
index 9f98c6e94..4370785b6 100644
--- a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/common.go
+++ b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/common.go
@@ -30,13 +30,17 @@ import (
"time"
MQTT "github.com/eclipse/paho.mqtt.golang"
- "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/wait"
+ clientset "k8s.io/client-go/kubernetes"
+ "k8s.io/klog/v2"
"github.com/kubeedge/kubeedge/common/constants"
"github.com/kubeedge/kubeedge/pkg/apis/devices/v1alpha2"
@@ -179,12 +183,12 @@ func edgecoreDeploymentSpec(imgURL, configmap string, replicas int) *apps.Deploy
ImagePullPolicy: v1.PullPolicy("IfNotPresent"),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
- v1.ResourceName(v1.ResourceCPU): resource.MustParse("200m"),
- v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
+ v1.ResourceCPU: resource.MustParse("200m"),
+ v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
- v1.ResourceName(v1.ResourceCPU): resource.MustParse("200m"),
- v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
+ v1.ResourceCPU: resource.MustParse("200m"),
+ v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
Env: []v1.EnvVar{{Name: "DOCKER_HOST", Value: "tcp://localhost:2375"}},
@@ -195,8 +199,8 @@ func edgecoreDeploymentSpec(imgURL, configmap string, replicas int) *apps.Deploy
Image: "docker:dind",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
- v1.ResourceName(v1.ResourceCPU): resource.MustParse("20m"),
- v1.ResourceName(v1.ResourceMemory): resource.MustParse("256Mi"),
+ v1.ResourceCPU: resource.MustParse("20m"),
+ v1.ResourceMemory: resource.MustParse("256Mi"),
},
},
VolumeMounts: []v1.VolumeMount{{Name: "docker-graph-storage", MountPath: "/var/lib/docker"}},
@@ -235,8 +239,8 @@ func cloudcoreDeploymentSpec(imgURL, configmap string, replicas int) *apps.Deplo
ImagePullPolicy: v1.PullPolicy("IfNotPresent"),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
- v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"),
- v1.ResourceName(v1.ResourceMemory): resource.MustParse("512Mi"),
+ v1.ResourceCPU: resource.MustParse("100m"),
+ v1.ResourceMemory: resource.MustParse("512Mi"),
},
},
Ports: portInfo,
@@ -1030,3 +1034,74 @@ func CallServicebus() (response string, err error) {
response = servicebusResponse.Body
return
}
+
+func GetStatefulSet(c clientset.Interface, ns, name string) (*apps.StatefulSet, error) {
+ return c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
+}
+
+func CreateStatefulSet(c clientset.Interface, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) {
+ return c.AppsV1().StatefulSets(statefulSet.Namespace).Create(context.TODO(), statefulSet, metav1.CreateOptions{})
+}
+
+// DeleteStatefulSet to delete statefulSet
+func DeleteStatefulSet(c clientset.Interface, ns, name string) error {
+ err := c.AppsV1().StatefulSets(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil
+ }
+
+ return err
+}
+
+// NewTestStatefulSet create statefulSet for test
+func NewTestStatefulSet(name, imgURL string, replicas int32) *apps.StatefulSet {
+ return &apps.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: Namespace,
+ Labels: map[string]string{"app": name},
+ },
+ Spec: apps.StatefulSetSpec{
+ Replicas: &replicas,
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": name},
+ },
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{"app": name},
+ },
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
+ {
+ Name: "nginx",
+ Image: imgURL,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
+func WaitForStatusReplicas(c clientset.Interface, ss *apps.StatefulSet, expectedReplicas int32) {
+ ns, name := ss.Namespace, ss.Name
+ pollErr := wait.PollImmediate(5*time.Second, 240*time.Second,
+ func() (bool, error) {
+ ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ if ssGet.Status.ObservedGeneration < ss.Generation {
+ return false, nil
+ }
+ if ssGet.Status.Replicas != expectedReplicas {
+ klog.Infof("Waiting for stateful set status.replicas to become %d, currently %d", expectedReplicas, ssGet.Status.Replicas)
+ return false, nil
+ }
+ return true, nil
+ })
+ if pollErr != nil {
+ Fatalf("Failed waiting for stateful set status.replicas updated to %d: %v", expectedReplicas, pollErr)
+ }
+}
diff --git a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/log.go b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/log.go
index d79508c2b..7268fd2d9 100644
--- a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/log.go
+++ b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/log.go
@@ -19,7 +19,7 @@ import (
"fmt"
"time"
- "github.com/onsi/ginkgo"
+ "github.com/onsi/ginkgo/v2"
)
//Function to get time in millisec
@@ -53,13 +53,12 @@ func Infof(format string, args ...interface{}) {
//Function to print the test case name and status of execution
func PrintTestcaseNameandStatus() {
- var testdesc ginkgo.GinkgoTestDescription
var Status string
- testdesc = ginkgo.CurrentGinkgoTestDescription()
- if testdesc.Failed {
+ testSpecReport := ginkgo.CurrentSpecReport()
+ if testSpecReport.Failed() {
Status = "FAILED"
} else {
Status = "PASSED"
}
- Infof("TestCase:%40s Status=%s", testdesc.TestText, Status)
+ Infof("TestCase:%40s Status=%s", testSpecReport.LeafNodeText, Status)
}
diff --git a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/node.go b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/node.go
index ab9795a29..a81b86c94 100644
--- a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/node.go
+++ b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/node.go
@@ -32,6 +32,8 @@ import (
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/yaml"
+
+ "github.com/kubeedge/kubeedge/common/constants"
)
func getpwd() string {
@@ -61,7 +63,7 @@ func DeRegisterNodeFromMaster(nodehandler, nodename string) error {
func GenerateNodeReqBody(nodeid, nodeselector string) (map[string]interface{}, error) {
var temp map[string]interface{}
- body := fmt.Sprintf(`{"kind": "Node","apiVersion": "v1","metadata": {"name": "%s","labels": {"name": "edgenode", "disktype":"%s", "node-role.kubernetes.io/edge": ""}}}`, nodeid, nodeselector)
+ body := fmt.Sprintf(`{"kind": "Node","apiVersion": "v1","metadata": {"name": "%s","labels": {"name": "edgenode", "disktype":"%s", "%s": "%s"}}}`, nodeid, nodeselector, constants.EdgeNodeRoleKey, constants.EdgeNodeRoleValue)
err := json.Unmarshal([]byte(body), &temp)
if err != nil {
Fatalf("Unmarshal body failed: %v", err)
diff --git a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/pod.go b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/pod.go
index 1c7803b67..ac791199f 100644
--- a/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/pod.go
+++ b/vendor/github.com/kubeedge/kubeedge/tests/e2e/utils/pod.go
@@ -17,6 +17,7 @@ limitations under the License.
package utils
import (
+ "context"
"encoding/json"
"io"
"net/http"
@@ -25,8 +26,11 @@ import (
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
- "k8s.io/client-go/kubernetes"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/wait"
+ clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
@@ -35,6 +39,42 @@ const (
podLabelSelector = "?fieldSelector=spec.nodeName="
)
+func ListPods(c clientset.Interface, ns string, labelSelector labels.Selector, fieldSelector fields.Selector) (*v1.PodList, error) {
+ options := metav1.ListOptions{}
+
+ if fieldSelector != nil {
+ options.FieldSelector = fieldSelector.String()
+ }
+
+ if labelSelector != nil {
+ options.LabelSelector = labelSelector.String()
+ }
+
+ return c.CoreV1().Pods(ns).List(context.TODO(), options)
+}
+
+func WaitForPodsToDisappear(c clientset.Interface, ns string, label labels.Selector, interval, timeout time.Duration) error {
+ return wait.PollImmediate(interval, timeout, func() (bool, error) {
+ Infof("Waiting for pod with label %s to disappear", label.String())
+ options := metav1.ListOptions{LabelSelector: label.String()}
+ pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
+ if err != nil {
+ return false, err
+ }
+
+ if pods != nil && len(pods.Items) == 0 {
+ Infof("Pod with label %s no longer exists", label.String())
+ return true, nil
+ }
+
+ return false, nil
+ })
+}
+
+func DeletePod(c clientset.Interface, name, ns string) error {
+ return c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
+}
+
//GetPods function to get the pods from Edged
func GetPods(apiserver, label string) (v1.PodList, error) {
var pods v1.PodList
@@ -175,7 +215,7 @@ func CheckDeploymentPodDeleteState(apiserver string, podlist v1.PodList) {
}
// NewKubeClient creates kube client from config
-func NewKubeClient(kubeConfigPath string) *kubernetes.Clientset {
+func NewKubeClient(kubeConfigPath string) *clientset.Clientset {
kubeConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err != nil {
Fatalf("Get kube config failed with error: %v", err)
@@ -184,7 +224,7 @@ func NewKubeClient(kubeConfigPath string) *kubernetes.Clientset {
kubeConfig.QPS = 5
kubeConfig.Burst = 10
kubeConfig.ContentType = "application/vnd.kubernetes.protobuf"
- kubeClient, err := kubernetes.NewForConfig(kubeConfig)
+ kubeClient, err := clientset.NewForConfig(kubeConfig)
if err != nil {
Fatalf("Get kube client failed with error: %v", err)
return nil
diff --git a/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go b/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go
index fe67238c6..490b4a296 100644
--- a/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go
+++ b/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go
@@ -4,14 +4,15 @@ import (
"errors"
"fmt"
"net"
+ "sync"
"github.com/libp2p/go-cidranger"
)
-var Store *indirectAsnStore
+var Store *lazyAsnStore
func init() {
- Store = newIndirectAsnStore()
+ Store = &lazyAsnStore{}
}
type networkWithAsn struct {
@@ -66,32 +67,29 @@ func newAsnStore() (*asnStore, error) {
return &asnStore{cr}, nil
}
-type indirectAsnStore struct {
- store *asnStore
- doneLoading chan struct{}
+// lazyAsnStore builds the underlying trie on first call to AsnForIPv6.
+// Alternatively, Init can be called to manually trigger initialization.
+type lazyAsnStore struct {
+ store *asnStore
+ once sync.Once
}
// AsnForIPv6 returns the AS number for the given IPv6 address.
// If no mapping exists for the given IP, this function will
// return an empty ASN and a nil error.
-func (a *indirectAsnStore) AsnForIPv6(ip net.IP) (string, error) {
- <-a.doneLoading
+func (a *lazyAsnStore) AsnForIPv6(ip net.IP) (string, error) {
+ a.once.Do(a.init)
return a.store.AsnForIPv6(ip)
}
-func newIndirectAsnStore() *indirectAsnStore {
- a := &indirectAsnStore{
- doneLoading: make(chan struct{}),
- }
-
- go func() {
- defer close(a.doneLoading)
- store, err := newAsnStore()
- if err != nil {
- panic(err)
- }
- a.store = store
- }()
+func (a *lazyAsnStore) Init() {
+ a.once.Do(a.init)
+}
- return a
+func (a *lazyAsnStore) init() {
+ store, err := newAsnStore()
+ if err != nil {
+ panic(err)
+ }
+ a.store = store
}
diff --git a/vendor/github.com/libp2p/go-libp2p-asn-util/version.json b/vendor/github.com/libp2p/go-libp2p-asn-util/version.json
index 1437d5b73..a654d65ab 100644
--- a/vendor/github.com/libp2p/go-libp2p-asn-util/version.json
+++ b/vendor/github.com/libp2p/go-libp2p-asn-util/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.2.0"
+ "version": "v0.3.0"
}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/LICENSE b/vendor/github.com/libp2p/go-libp2p-core/LICENSE
deleted file mode 100644
index 770d1744d..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/LICENSE
+++ /dev/null
@@ -1,4 +0,0 @@
-Dual-licensed under MIT and ASLv2, by way of the [Permissive License Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
-
-Apache-2.0: https://www.apache.org/licenses/license-2.0
-MIT: https://www.opensource.org/licenses/mit
diff --git a/vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE b/vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE
deleted file mode 100644
index 546514363..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2019. Protocol Labs, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go
deleted file mode 100644
index 4622e083e..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package crypto
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "io"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// ECDSAPrivateKey is an implementation of an ECDSA private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ECDSAPrivateKey instead
-type ECDSAPrivateKey = crypto.ECDSAPrivateKey
-
-// ECDSAPublicKey is an implementation of an ECDSA public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ECDSAPublicKey instead
-type ECDSAPublicKey = crypto.ECDSAPublicKey
-
-// ECDSASig holds the r and s values of an ECDSA signature
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ECDSASig instead
-type ECDSASig = crypto.ECDSASig
-
-var (
- // ErrNotECDSAPubKey is returned when the public key passed is not an ecdsa public key
- // Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ErrNotECDSAPubKey instead
- ErrNotECDSAPubKey = crypto.ErrNotECDSAPubKey
- // ErrNilSig is returned when the signature is nil
- // Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ErrNilSig instead
- ErrNilSig = crypto.ErrNilSig
- // ErrNilPrivateKey is returned when a nil private key is provided
- // Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ErrNilPrivateKey instead
- ErrNilPrivateKey = crypto.ErrNilPrivateKey
- // ErrNilPublicKey is returned when a nil public key is provided
- // Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ErrNilPublicKey instead
- ErrNilPublicKey = crypto.ErrNilPublicKey
- // ECDSACurve is the default ecdsa curve used
- // Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ECDSACurve instead
- ECDSACurve = elliptic.P256()
-)
-
-// GenerateECDSAKeyPair generates a new ecdsa private and public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateECDSAKeyPair instead
-func GenerateECDSAKeyPair(src io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateECDSAKeyPair(src)
-}
-
-// GenerateECDSAKeyPairWithCurve generates a new ecdsa private and public key with a speicified curve
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateECDSAKeyPairWithCurve instead
-func GenerateECDSAKeyPairWithCurve(curve elliptic.Curve, src io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateECDSAKeyPairWithCurve(curve, src)
-}
-
-// ECDSAKeyPairFromKey generates a new ecdsa private and public key from an input private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ECDSAKeyPairFromKey instead
-func ECDSAKeyPairFromKey(priv *ecdsa.PrivateKey) (PrivKey, PubKey, error) {
- return crypto.ECDSAKeyPairFromKey(priv)
-}
-
-// ECDSAPublicKeyFromPubKey generates a new ecdsa public key from an input public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ECDSAPublicKeyFromPubKey instead
-func ECDSAPublicKeyFromPubKey(pub ecdsa.PublicKey) (PubKey, error) {
- return crypto.ECDSAPublicKeyFromPubKey(pub)
-}
-
-// MarshalECDSAPrivateKey returns x509 bytes from a private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.MarshalECDSAPrivateKey instead
-func MarshalECDSAPrivateKey(ePriv ECDSAPrivateKey) (res []byte, err error) {
- return crypto.MarshalECDSAPrivateKey(ePriv)
-}
-
-// MarshalECDSAPublicKey returns x509 bytes from a public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.MarshalECDSAPublicKey instead
-func MarshalECDSAPublicKey(ePub ECDSAPublicKey) (res []byte, err error) {
- return crypto.MarshalECDSAPublicKey(ePub)
-}
-
-// UnmarshalECDSAPrivateKey returns a private key from x509 bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.MarshalECDSAPrivateKey instead
-func UnmarshalECDSAPrivateKey(data []byte) (res PrivKey, err error) {
- return crypto.UnmarshalECDSAPrivateKey(data)
-}
-
-// UnmarshalECDSAPublicKey returns the public key from x509 bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalECDSAPublicKey instead
-func UnmarshalECDSAPublicKey(data []byte) (key PubKey, err error) {
- return crypto.UnmarshalECDSAPublicKey(data)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go
deleted file mode 100644
index 7dcd93bce..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package crypto
-
-import (
- "io"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// Ed25519PrivateKey is an ed25519 private key.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.Ed25519PrivateKey instead
-type Ed25519PrivateKey = crypto.Ed25519PrivateKey
-
-// Ed25519PublicKey is an ed25519 public key.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.Ed25519PublicKey instead
-type Ed25519PublicKey = crypto.Ed25519PublicKey
-
-// GenerateEd25519Key generates a new ed25519 private and public key pair.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateEd25519Key instead
-func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateEd25519Key(src)
-}
-
-// UnmarshalEd25519PublicKey returns a public key from input bytes.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalEd25519PublicKey instead
-func UnmarshalEd25519PublicKey(data []byte) (PubKey, error) {
- return crypto.UnmarshalEd25519PublicKey(data)
-}
-
-// UnmarshalEd25519PrivateKey returns a private key from input bytes.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalEd25519PrivateKey instead
-func UnmarshalEd25519PrivateKey(data []byte) (PrivKey, error) {
- return crypto.UnmarshalEd25519PrivateKey(data)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/key.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/key.go
deleted file mode 100644
index ff89989f7..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/key.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Deprecated: This package has moved into go-libp2p as a sub-package: github.com/libp2p/go-libp2p/core/crypto.
-//
-// Package crypto implements various cryptographic utilities used by libp2p.
-// This includes a Public and Private key interface and key implementations
-// for supported key algorithms.
-package crypto
-
-import (
- "errors"
- "io"
-
- "github.com/libp2p/go-libp2p/core/crypto"
- pb "github.com/libp2p/go-libp2p/core/crypto/pb"
-)
-
-const (
- // RSA is an enum for the supported RSA key type
- RSA = iota
- // Ed25519 is an enum for the supported Ed25519 key type
- Ed25519
- // Secp256k1 is an enum for the supported Secp256k1 key type
- Secp256k1
- // ECDSA is an enum for the supported ECDSA key type
- ECDSA
-)
-
-var (
- // ErrBadKeyType is returned when a key is not supported
- ErrBadKeyType = errors.New("invalid or unsupported key type")
- // KeyTypes is a list of supported keys
- KeyTypes = []int{
- RSA,
- Ed25519,
- Secp256k1,
- ECDSA,
- }
-)
-
-// PubKeyUnmarshaller is a func that creates a PubKey from a given slice of bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PubKeyUnmarshaller instead
-type PubKeyUnmarshaller = crypto.PubKeyUnmarshaller
-
-// PrivKeyUnmarshaller is a func that creates a PrivKey from a given slice of bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PrivKeyUnmarshaller instead
-type PrivKeyUnmarshaller = crypto.PrivKeyUnmarshaller
-
-// PubKeyUnmarshallers is a map of unmarshallers by key type
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PubKeyUnmarshallers instead
-var PubKeyUnmarshallers = crypto.PubKeyUnmarshallers
-
-// PrivKeyUnmarshallers is a map of unmarshallers by key type
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PrivKeyUnmarshallers instead
-var PrivKeyUnmarshallers = crypto.PrivKeyUnmarshallers
-
-// Key represents a crypto key that can be compared to another key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.Key instead
-type Key = crypto.Key
-
-// PrivKey represents a private key that can be used to generate a public key and sign data
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PrivKey instead
-type PrivKey = crypto.PrivKey
-
-// PubKey is a public key that can be used to verifiy data signed with the corresponding private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PubKey instead
-type PubKey = crypto.PubKey
-
-// GenSharedKey generates the shared key from a given private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenSharedKey instead
-type GenSharedKey = crypto.GenSharedKey
-
-// GenerateKeyPair generates a private and public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateKeyPair instead
-func GenerateKeyPair(typ, bits int) (PrivKey, PubKey, error) {
- return crypto.GenerateKeyPair(typ, bits)
-}
-
-// GenerateKeyPairWithReader returns a keypair of the given type and bitsize
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateKeyPairWithReader instead
-func GenerateKeyPairWithReader(typ, bits int, src io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateKeyPairWithReader(typ, bits, src)
-}
-
-// GenerateEKeyPair returns an ephemeral public key and returns a function that will compute
-// the shared secret key. Used in the identify module.
-//
-// Focuses only on ECDH now, but can be made more general in the future.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateEKeyPair instead
-func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) {
- return crypto.GenerateEKeyPair(curveName)
-}
-
-// UnmarshalPublicKey converts a protobuf serialized public key into its
-// representative object
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalPublicKey instead
-func UnmarshalPublicKey(data []byte) (PubKey, error) {
- return crypto.UnmarshalPublicKey(data)
-}
-
-// PublicKeyFromProto converts an unserialized protobuf PublicKey message
-// into its representative object.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PublicKeyFromProto instead
-func PublicKeyFromProto(pmes *pb.PublicKey) (PubKey, error) {
- return crypto.PublicKeyFromProto(pmes)
-}
-
-// MarshalPublicKey converts a public key object into a protobuf serialized
-// public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.MarshalPublicKey instead
-func MarshalPublicKey(k PubKey) ([]byte, error) {
- return crypto.MarshalPublicKey(k)
-}
-
-// PublicKeyToProto converts a public key object into an unserialized
-// protobuf PublicKey message.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PublicKeyToProto instead
-func PublicKeyToProto(k PubKey) (*pb.PublicKey, error) {
- return crypto.PublicKeyToProto(k)
-}
-
-// UnmarshalPrivateKey converts a protobuf serialized private key into its
-// representative object
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalPrivateKey instead
-func UnmarshalPrivateKey(data []byte) (PrivKey, error) {
- return crypto.UnmarshalPrivateKey(data)
-}
-
-// MarshalPrivateKey converts a key object into its protobuf serialized form.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.MarshalPrivateKey instead
-func MarshalPrivateKey(k PrivKey) ([]byte, error) {
- return crypto.MarshalPrivateKey(k)
-}
-
-// ConfigDecodeKey decodes from b64 (for config file) to a byte array that can be unmarshalled.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ConfigDecodeKey instead
-func ConfigDecodeKey(b string) ([]byte, error) {
- return crypto.ConfigDecodeKey(b)
-}
-
-// ConfigEncodeKey encodes a marshalled key to b64 (for config file).
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ConfigEncodeKey instead
-func ConfigEncodeKey(b []byte) string {
- return crypto.ConfigEncodeKey(b)
-}
-
-// KeyEqual checks whether two Keys are equivalent (have identical byte representations).
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.KeyEqual instead
-func KeyEqual(k1, k2 Key) bool {
- return crypto.KeyEqual(k1, k2)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go
deleted file mode 100644
index c98fc54c6..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go
+++ /dev/null
@@ -1,28 +0,0 @@
-//go:build !openssl
-// +build !openssl
-
-package crypto
-
-import (
- stdcrypto "crypto"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p-core/crypto keys
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.KeyPairFromStdKey instead
-func KeyPairFromStdKey(priv stdcrypto.PrivateKey) (PrivKey, PubKey, error) {
- return crypto.KeyPairFromStdKey(priv)
-}
-
-// PrivKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) private keys
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PrivKeyToStdKey instead
-func PrivKeyToStdKey(priv PrivKey) (stdcrypto.PrivateKey, error) {
- return crypto.PrivKeyToStdKey(priv)
-}
-
-// PubKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) public keys
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PubKeyToStdKey instead
-func PubKeyToStdKey(pub PubKey) (stdcrypto.PublicKey, error) {
- return crypto.PubKeyToStdKey(pub)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go
deleted file mode 100644
index a9c044275..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go
+++ /dev/null
@@ -1,28 +0,0 @@
-//go:build openssl
-// +build openssl
-
-package crypto
-
-import (
- stdcrypto "crypto"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p-core/crypto keys
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.KeyPairFromStdKey instead
-func KeyPairFromStdKey(priv stdcrypto.PrivateKey) (_priv PrivKey, _pub PubKey, err error) {
- return crypto.KeyPairFromStdKey(priv)
-}
-
-// PrivKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) private keys
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PrivKeyToStdKey instead
-func PrivKeyToStdKey(priv PrivKey) (_priv stdcrypto.PrivateKey, err error) {
- return crypto.PrivKeyToStdKey(priv)
-}
-
-// PubKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) public keys
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.PubKeyToStdKey instead
-func PubKeyToStdKey(pub PubKey) (key stdcrypto.PublicKey, err error) {
- return crypto.PubKeyToStdKey(pub)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go
deleted file mode 100644
index d3520c95a..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package crypto
-
-import (
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// WeakRsaKeyEnv is an environment variable which, when set, lowers the
-// minimum required bits of RSA keys to 512. This should be used exclusively in
-// test situations.
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.WeakRsaKeyEnv instead
-const WeakRsaKeyEnv = crypto.WeakRsaKeyEnv
-
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.MinRsaKeyBits instead
-var MinRsaKeyBits = crypto.MinRsaKeyBits
-
-// ErrRsaKeyTooSmall is returned when trying to generate or parse an RSA key
-// that's smaller than MinRsaKeyBits bits. In test
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.ErrRsaKeyTooSmall instead
-var ErrRsaKeyTooSmall error
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go
deleted file mode 100644
index 6f68277b2..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go
+++ /dev/null
@@ -1,36 +0,0 @@
-//go:build !openssl
-// +build !openssl
-
-package crypto
-
-import (
- "io"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// RsaPrivateKey is an rsa private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.RsaPrivateKey instead
-type RsaPrivateKey = crypto.RsaPrivateKey
-
-// RsaPublicKey is an rsa public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.RsaPublicKey instead
-type RsaPublicKey = crypto.RsaPublicKey
-
-// GenerateRSAKeyPair generates a new rsa private and public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateRSAKeyPair
-func GenerateRSAKeyPair(bits int, src io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateRSAKeyPair(bits, src)
-}
-
-// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalRsaPrivateKey
-func UnmarshalRsaPrivateKey(b []byte) (key PrivKey, err error) {
- return crypto.UnmarshalRsaPrivateKey(b)
-}
-
-// UnmarshalRsaPublicKey returns a public key from the input x509 bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalRsaPublicKey
-func UnmarshalRsaPublicKey(b []byte) (key PubKey, err error) {
- return crypto.UnmarshalRsaPublicKey(b)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go
deleted file mode 100644
index bba88d7f8..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go
+++ /dev/null
@@ -1,36 +0,0 @@
-//go:build openssl
-// +build openssl
-
-package crypto
-
-import (
- "io"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// RsaPrivateKey is an rsa private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.RsaPrivateKey instead
-type RsaPrivateKey = crypto.RsaPrivateKey
-
-// RsaPublicKey is an rsa public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.RsaPublicKey instead
-type RsaPublicKey = crypto.RsaPublicKey
-
-// GenerateRSAKeyPair generates a new rsa private and public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateRSAKeyPair instead
-func GenerateRSAKeyPair(bits int, r io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateRSAKeyPair(bits, r)
-}
-
-// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalRsaPrivateKey instead
-func UnmarshalRsaPrivateKey(b []byte) (PrivKey, error) {
- return crypto.UnmarshalRsaPrivateKey(b)
-}
-
-// UnmarshalRsaPublicKey returns a public key from the input x509 bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalRsaPublicKey instead
-func UnmarshalRsaPublicKey(b []byte) (PubKey, error) {
- return crypto.UnmarshalRsaPublicKey(b)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go
deleted file mode 100644
index e8f3200d2..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package crypto
-
-import (
- "io"
-
- "github.com/libp2p/go-libp2p/core/crypto"
-)
-
-// Secp256k1PrivateKey is an Secp256k1 private key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.Secp256k1PrivateKey instead
-type Secp256k1PrivateKey = crypto.Secp256k1PrivateKey
-
-// Secp256k1PublicKey is an Secp256k1 public key
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.Secp256k1PublicKey instead
-type Secp256k1PublicKey = crypto.Secp256k1PublicKey
-
-// GenerateSecp256k1Key generates a new Secp256k1 private and public key pair
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.GenerateSecp256k1Key instead
-func GenerateSecp256k1Key(src io.Reader) (PrivKey, PubKey, error) {
- return crypto.GenerateSecp256k1Key(src)
-}
-
-// UnmarshalSecp256k1PrivateKey returns a private key from bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalSecp256k1PrivateKey instead
-func UnmarshalSecp256k1PrivateKey(data []byte) (k PrivKey, err error) {
- return crypto.UnmarshalSecp256k1PrivateKey(data)
-}
-
-// UnmarshalSecp256k1PublicKey returns a public key from bytes
-// Deprecated: use github.com/libp2p/go-libp2p/core/crypto.UnmarshalSecp256k1PublicKey instead
-func UnmarshalSecp256k1PublicKey(data []byte) (_k PubKey, err error) {
- return crypto.UnmarshalSecp256k1PublicKey(data)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go b/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go
deleted file mode 100644
index d07be820a..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package peer
-
-import (
- "github.com/libp2p/go-libp2p/core/peer"
-
- ma "github.com/multiformats/go-multiaddr"
-)
-
-// AddrInfo is a small struct used to pass around a peer with
-// a set of addresses (and later, keys?).
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AddrInfo instead
-type AddrInfo = peer.AddrInfo
-
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.ErrInvalidAddr instead
-var ErrInvalidAddr = peer.ErrInvalidAddr
-
-// AddrInfosFromP2pAddrs converts a set of Multiaddrs to a set of AddrInfos.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AddrInfosFromP2pAddrs instead
-func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) {
- return peer.AddrInfosFromP2pAddrs(maddrs...)
-}
-
-// SplitAddr splits a p2p Multiaddr into a transport multiaddr and a peer ID.
-//
-// * Returns a nil transport if the address only contains a /p2p part.
-// * Returns a empty peer ID if the address doesn't contain a /p2p part.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.SplitAddr instead
-func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID) {
- return peer.SplitAddr(m)
-}
-
-// AddrInfoFromString builds an AddrInfo from the string representation of a Multiaddr
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AddrInfoFromString instead
-func AddrInfoFromString(s string) (*AddrInfo, error) {
- return peer.AddrInfoFromString(s)
-}
-
-// AddrInfoFromP2pAddr converts a Multiaddr to an AddrInfo.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AddrInfoFromP2pAddr instead
-func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) {
- return peer.AddrInfoFromP2pAddr(m)
-}
-
-// AddrInfoToP2pAddrs converts an AddrInfo to a list of Multiaddrs.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AddrInfoToP2pAddrs instead
-func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
- return peer.AddrInfoToP2pAddrs(pi)
-}
-
-// AddrInfosToIDs extracts the peer IDs from the passed AddrInfos and returns them in-order.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AddrInfosToIDs instead
-func AddrInfosToIDs(pis []AddrInfo) []ID {
- return peer.AddrInfosToIDs(pis)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/peer.go b/vendor/github.com/libp2p/go-libp2p-core/peer/peer.go
deleted file mode 100644
index efd126755..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/peer/peer.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Deprecated: This package has moved into go-libp2p as a sub-package: github.com/libp2p/go-libp2p/core/peer.
-//
-// Package peer implements an object used to represent peers in the libp2p network.
-package peer
-
-import (
- "github.com/libp2p/go-libp2p/core/peer"
-
- "github.com/ipfs/go-cid"
- ic "github.com/libp2p/go-libp2p-core/crypto"
-)
-
-var (
- // ErrEmptyPeerID is an error for empty peer ID.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peer.ErrEmptyPeerID instead
- ErrEmptyPeerID = peer.ErrEmptyPeerID
- // ErrNoPublicKey is an error for peer IDs that don't embed public keys
- // Deprecated: use github.com/libp2p/go-libp2p/core/peer.ErrNoPublicKey instead
- ErrNoPublicKey = peer.ErrNoPublicKey
-)
-
-// AdvancedEnableInlining enables automatically inlining keys shorter than
-// 42 bytes into the peer ID (using the "identity" multihash function).
-//
-// WARNING: This flag will likely be set to false in the future and eventually
-// be removed in favor of using a hash function specified by the key itself.
-// See: https://github.com/libp2p/specs/issues/138
-//
-// DO NOT change this flag unless you know what you're doing.
-//
-// This currently defaults to true for backwards compatibility but will likely
-// be set to false by default when an upgrade path is determined.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.AdvancedEnableInlining instead
-var AdvancedEnableInlining = peer.AdvancedEnableInlining
-
-const maxInlineKeyLength = 42
-
-// ID is a libp2p peer identity.
-//
-// Peer IDs are derived by hashing a peer's public key and encoding the
-// hash output as a multihash. See IDFromPublicKey for details.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.ID instead
-type ID = peer.ID
-
-// IDFromBytes casts a byte slice to the ID type, and validates
-// the value to make sure it is a multihash.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.IDFromBytes instead
-func IDFromBytes(b []byte) (ID, error) {
- return peer.IDFromBytes(b)
-}
-
-// Decode accepts an encoded peer ID and returns the decoded ID if the input is
-// valid.
-//
-// The encoded peer ID can either be a CID of a key or a raw multihash (identity
-// or sha256-256).
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.Decode instead
-func Decode(s string) (ID, error) {
- return peer.Decode(s)
-}
-
-// Encode encodes a peer ID as a string.
-//
-// At the moment, it base58 encodes the peer ID but, in the future, it will
-// switch to encoding it as a CID by default.
-//
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.Encode instead
-func Encode(id ID) string {
- return peer.Encode(id)
-}
-
-// FromCid converts a CID to a peer ID, if possible.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.FromCid instead
-func FromCid(c cid.Cid) (ID, error) {
- return peer.FromCid(c)
-}
-
-// ToCid encodes a peer ID as a CID of the public key.
-//
-// If the peer ID is invalid (e.g., empty), this will return the empty CID.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.ToCid instead
-func ToCid(id ID) cid.Cid {
- return peer.ToCid(id)
-}
-
-// IDFromPublicKey returns the Peer ID corresponding to the public key pk.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.IDFromPublicKey instead
-func IDFromPublicKey(pk ic.PubKey) (ID, error) {
- return peer.IDFromPublicKey(pk)
-}
-
-// IDFromPrivateKey returns the Peer ID corresponding to the secret key sk.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.IDFromPrivateKey instead
-func IDFromPrivateKey(sk ic.PrivKey) (ID, error) {
- return peer.IDFromPrivateKey(sk)
-}
-
-// IDSlice for sorting peers.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.IDSlice instead
-type IDSlice = peer.IDSlice
diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/record.go b/vendor/github.com/libp2p/go-libp2p-core/peer/record.go
deleted file mode 100644
index f2cc8691e..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/peer/record.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package peer
-
-import (
- "github.com/libp2p/go-libp2p/core/peer"
-
- pb "github.com/libp2p/go-libp2p/core/peer/pb"
-)
-
-// PeerRecordEnvelopeDomain is the domain string used for peer records contained in a Envelope.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.PeerRecordEnvelopeDomain instead
-const PeerRecordEnvelopeDomain = peer.PeerRecordEnvelopeDomain
-
-// PeerRecordEnvelopePayloadType is the type hint used to identify peer records in a Envelope.
-// Defined in https://github.com/multiformats/multicodec/blob/master/table.csv
-// with name "libp2p-peer-record".
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.PeerRecordEnvelopePayloadType instead
-var PeerRecordEnvelopePayloadType = peer.PeerRecordEnvelopePayloadType
-
-// PeerRecord contains information that is broadly useful to share with other peers,
-// either through a direct exchange (as in the libp2p identify protocol), or through
-// a Peer Routing provider, such as a DHT.
-//
-// Currently, a PeerRecord contains the public listen addresses for a peer, but this
-// is expected to expand to include other information in the future.
-//
-// PeerRecords are ordered in time by their Seq field. Newer PeerRecords must have
-// greater Seq values than older records. The NewPeerRecord function will create
-// a PeerRecord with a timestamp-based Seq value. The other PeerRecord fields should
-// be set by the caller:
-//
-// rec := peer.NewPeerRecord()
-// rec.PeerID = aPeerID
-// rec.Addrs = someAddrs
-//
-// Alternatively, you can construct a PeerRecord struct directly and use the TimestampSeq
-// helper to set the Seq field:
-//
-// rec := peer.PeerRecord{PeerID: aPeerID, Addrs: someAddrs, Seq: peer.TimestampSeq()}
-//
-// Failing to set the Seq field will not result in an error, however, a PeerRecord with a
-// Seq value of zero may be ignored or rejected by other peers.
-//
-// PeerRecords are intended to be shared with other peers inside a signed
-// routing.Envelope, and PeerRecord implements the routing.Record interface
-// to facilitate this.
-//
-// To share a PeerRecord, first call Sign to wrap the record in a Envelope
-// and sign it with the local peer's private key:
-//
-// rec := &PeerRecord{PeerID: myPeerId, Addrs: myAddrs}
-// envelope, err := rec.Sign(myPrivateKey)
-//
-// The resulting record.Envelope can be marshalled to a []byte and shared
-// publicly. As a convenience, the MarshalSigned method will produce the
-// Envelope and marshal it to a []byte in one go:
-//
-// rec := &PeerRecord{PeerID: myPeerId, Addrs: myAddrs}
-// recordBytes, err := rec.MarshalSigned(myPrivateKey)
-//
-// To validate and unmarshal a signed PeerRecord from a remote peer,
-// "consume" the containing envelope, which will return both the
-// routing.Envelope and the inner Record. The Record must be cast to
-// a PeerRecord pointer before use:
-//
-// envelope, untypedRecord, err := ConsumeEnvelope(envelopeBytes, PeerRecordEnvelopeDomain)
-// if err != nil {
-// handleError(err)
-// return
-// }
-// peerRec := untypedRecord.(*PeerRecord)
-//
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.PeerRecord instead
-type PeerRecord = peer.PeerRecord
-
-// NewPeerRecord returns a PeerRecord with a timestamp-based sequence number.
-// The returned record is otherwise empty and should be populated by the caller.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.NewPeerRecord instead
-func NewPeerRecord() *PeerRecord {
- return peer.NewPeerRecord()
-}
-
-// PeerRecordFromAddrInfo creates a PeerRecord from an AddrInfo struct.
-// The returned record will have a timestamp-based sequence number.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.PeerRecordFromAddrInfo instead
-func PeerRecordFromAddrInfo(info AddrInfo) *PeerRecord {
- return peer.PeerRecordFromAddrInfo(info)
-}
-
-// PeerRecordFromProtobuf creates a PeerRecord from a protobuf PeerRecord
-// struct.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.PeerRecordFromProtobuf instead
-func PeerRecordFromProtobuf(msg *pb.PeerRecord) (*PeerRecord, error) {
- return peer.PeerRecordFromProtobuf(msg)
-}
-
-// TimestampSeq is a helper to generate a timestamp-based sequence number for a PeerRecord.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peer.TimestampSeq instead
-func TimestampSeq() uint64 {
- return peer.TimestampSeq()
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/peerstore/helpers.go b/vendor/github.com/libp2p/go-libp2p-core/peerstore/helpers.go
deleted file mode 100644
index f678e7eff..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/peerstore/helpers.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package peerstore
-
-import (
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/peerstore"
-)
-
-// AddrInfos returns an AddrInfo for each specified peer ID, in-order.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.AddrInfos instead
-func AddrInfos(ps Peerstore, peers []peer.ID) []peer.AddrInfo {
- return peerstore.AddrInfos(ps, peers)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/peerstore/peerstore.go b/vendor/github.com/libp2p/go-libp2p-core/peerstore/peerstore.go
deleted file mode 100644
index 64c38b227..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/peerstore/peerstore.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Deprecated: This package has moved into go-libp2p as a sub-package: github.com/libp2p/go-libp2p/core/peerstore.
-//
-// Package peerstore provides types and interfaces for local storage of address information,
-// metadata, and public key material about libp2p peers.
-package peerstore
-
-import (
- "github.com/libp2p/go-libp2p/core/peerstore"
-)
-
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.ErrNotFound instead
-var ErrNotFound = peerstore.ErrNotFound
-
-var (
- // AddressTTL is the expiration time of addresses.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.AddressTTL instead
- AddressTTL = peerstore.AddressTTL
-
- // TempAddrTTL is the ttl used for a short lived address.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.TempAddrTTL instead
- TempAddrTTL = peerstore.TempAddrTTL
-
- // ProviderAddrTTL is the TTL of an address we've received from a provider.
- // This is also a temporary address, but lasts longer. After this expires,
- // the records we return will require an extra lookup.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.ProviderAddrTTL instead
- ProviderAddrTTL = peerstore.ProviderAddrTTL
-
- // RecentlyConnectedAddrTTL is used when we recently connected to a peer.
- // It means that we are reasonably certain of the peer's address.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.RecentlyConnectedAddrTTL instead
- RecentlyConnectedAddrTTL = peerstore.RecentlyConnectedAddrTTL
-
- // OwnObservedAddrTTL is used for our own external addresses observed by peers.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.OwnObservedAddrTTL instead
- OwnObservedAddrTTL = peerstore.OwnObservedAddrTTL
-)
-
-// Permanent TTLs (distinct so we can distinguish between them, constant as they
-// are, in fact, permanent)
-const (
- // PermanentAddrTTL is the ttl for a "permanent address" (e.g. bootstrap nodes).
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.PermanentAddrTTL instead
- PermanentAddrTTL = peerstore.PermanentAddrTTL
-
- // ConnectedAddrTTL is the ttl used for the addresses of a peer to whom
- // we're connected directly. This is basically permanent, as we will
- // clear them + re-add under a TempAddrTTL after disconnecting.
- // Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.ConnectedAddrTTL instead
- ConnectedAddrTTL = peerstore.ConnectedAddrTTL
-)
-
-// Peerstore provides a threadsafe store of Peer related
-// information.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.Peerstore instead
-type Peerstore = peerstore.Peerstore
-
-// PeerMetadata can handle values of any type. Serializing values is
-// up to the implementation. Dynamic type introspection may not be
-// supported, in which case explicitly enlisting types in the
-// serializer may be required.
-//
-// Refer to the docs of the underlying implementation for more
-// information.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.PeerMetadata instead
-type PeerMetadata = peerstore.PeerMetadata
-
-// AddrBook holds the multiaddrs of peers.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.AddrBook instead
-type AddrBook = peerstore.AddrBook
-
-// CertifiedAddrBook manages "self-certified" addresses for remote peers.
-// Self-certified addresses are contained in peer.PeerRecords
-// which are wrapped in a record.Envelope and signed by the peer
-// to whom they belong.
-//
-// Certified addresses (CA) are generally more secure than uncertified
-// addresses (UA). Consequently, CAs beat and displace UAs. When the
-// peerstore learns CAs for a peer, it will reject UAs for the same peer
-// (as long as the former haven't expired).
-// Furthermore, peer records act like sequenced snapshots of CAs. Therefore,
-// processing a peer record that's newer than the last one seen overwrites
-// all addresses with the incoming ones.
-//
-// This interface is most useful when combined with AddrBook.
-// To test whether a given AddrBook / Peerstore implementation supports
-// certified addresses, callers should use the GetCertifiedAddrBook helper or
-// type-assert on the CertifiedAddrBook interface:
-//
-// if cab, ok := aPeerstore.(CertifiedAddrBook); ok {
-// cab.ConsumePeerRecord(signedPeerRecord, aTTL)
-// }
-//
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.CertifiedAddrBook instead
-type CertifiedAddrBook = peerstore.CertifiedAddrBook
-
-// GetCertifiedAddrBook is a helper to "upcast" an AddrBook to a
-// CertifiedAddrBook by using type assertion. If the given AddrBook
-// is also a CertifiedAddrBook, it will be returned, and the ok return
-// value will be true. Returns (nil, false) if the AddrBook is not a
-// CertifiedAddrBook.
-//
-// Note that since Peerstore embeds the AddrBook interface, you can also
-// call GetCertifiedAddrBook(myPeerstore).
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.GetCertifiedAddrBook instead
-func GetCertifiedAddrBook(ab AddrBook) (cab CertifiedAddrBook, ok bool) {
- return peerstore.GetCertifiedAddrBook(ab)
-}
-
-// KeyBook tracks the keys of Peers.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.KeyBook instead
-type KeyBook = peerstore.KeyBook
-
-// Metrics tracks metrics across a set of peers.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.Metrics instead
-type Metrics = peerstore.Metrics
-
-// ProtoBook tracks the protocols supported by peers.
-// Deprecated: use github.com/libp2p/go-libp2p/core/peerstore.ProtoBook instead
-type ProtoBook = peerstore.ProtoBook
diff --git a/vendor/github.com/libp2p/go-libp2p-core/routing/options.go b/vendor/github.com/libp2p/go-libp2p-core/routing/options.go
deleted file mode 100644
index 10d93b436..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/routing/options.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package routing
-
-import "github.com/libp2p/go-libp2p/core/routing"
-
-// Option is a single routing option.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.Option instead
-type Option = routing.Option
-
-// Options is a set of routing options
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.Options instead
-type Options = routing.Options
-
-// Expired is an option that tells the routing system to return expired records
-// when no newer records are known.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.Expired instead
-var Expired = routing.Expired
-
-// Offline is an option that tells the routing system to operate offline (i.e., rely on cached/local data only).
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.Offline instead
-var Offline = routing.Offline
diff --git a/vendor/github.com/libp2p/go-libp2p-core/routing/query.go b/vendor/github.com/libp2p/go-libp2p-core/routing/query.go
deleted file mode 100644
index bdfe18b52..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/routing/query.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package routing
-
-import (
- "context"
-
- "github.com/libp2p/go-libp2p/core/routing"
-)
-
-// QueryEventType indicates the query event's type.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.QueryEventType instead
-type QueryEventType = routing.QueryEventType
-
-// Number of events to buffer.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.QueryEventBufferSize instead
-var QueryEventBufferSize = routing.QueryEventBufferSize
-
-const (
- // Sending a query to a peer.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.SendingQuery instead
- SendingQuery = routing.SendingQuery
- // Got a response from a peer.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.PeerResponse instead
- PeerResponse = routing.PeerResponse
- // Found a "closest" peer (not currently used).
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.FinalPeer instead
- FinalPeer = routing.FinalPeer
- // Got an error when querying.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.QueryError instead
- QueryError = routing.QueryError
- // Found a provider.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.Provider instead
- Provider = routing.Provider
- // Found a value.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.Value instead
- Value = routing.Value
- // Adding a peer to the query.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.AddingPeer instead
- AddingPeer = routing.AddingPeer
- // Dialing a peer.
- // Deprecated: use github.com/libp2p/go-libp2p/core/routing.DialingPeer instead
- DialingPeer = routing.DialingPeer
-)
-
-// QueryEvent is emitted for every notable event that happens during a DHT query.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.QueryEvent instead
-type QueryEvent = routing.QueryEvent
-
-// RegisterForQueryEvents registers a query event channel with the given
-// context. The returned context can be passed to DHT queries to receive query
-// events on the returned channels.
-//
-// The passed context MUST be canceled when the caller is no longer interested
-// in query events.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.RegisterForQueryEvents instead
-func RegisterForQueryEvents(ctx context.Context) (context.Context, <-chan *QueryEvent) {
- return routing.RegisterForQueryEvents(ctx)
-}
-
-// PublishQueryEvent publishes a query event to the query event channel
-// associated with the given context, if any.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.PublishQueryEvent instead
-func PublishQueryEvent(ctx context.Context, ev *QueryEvent) {
- routing.PublishQueryEvent(ctx, ev)
-}
-
-// SubscribesToQueryEvents returns true if the context subscribes to query
-// events. If this function returns falls, calling `PublishQueryEvent` on the
-// context will be a no-op.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.SubscribesToQueryEvents instead
-func SubscribesToQueryEvents(ctx context.Context) bool {
- return routing.SubscribesToQueryEvents(ctx)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-core/routing/routing.go b/vendor/github.com/libp2p/go-libp2p-core/routing/routing.go
deleted file mode 100644
index 42f9dbc29..000000000
--- a/vendor/github.com/libp2p/go-libp2p-core/routing/routing.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Deprecated: This package has moved into go-libp2p as a sub-package: github.com/libp2p/go-libp2p/core/routing.
-//
-// Package routing provides interfaces for peer routing and content routing in libp2p.
-package routing
-
-import (
- "context"
-
- ci "github.com/libp2p/go-libp2p/core/crypto"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/routing"
-)
-
-// ErrNotFound is returned when the router fails to find the requested record.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.ErrNotFound instead
-var ErrNotFound = routing.ErrNotFound
-
-// ErrNotSupported is returned when the router doesn't support the given record
-// type/operation.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.ErrNotSupported instead
-var ErrNotSupported = routing.ErrNotSupported
-
-// ContentRouting is a value provider layer of indirection. It is used to find
-// information about who has what content.
-//
-// Content is identified by CID (content identifier), which encodes a hash
-// of the identified content in a future-proof manner.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.ContentRouting instead
-type ContentRouting = routing.ContentRouting
-
-// PeerRouting is a way to find address information about certain peers.
-// This can be implemented by a simple lookup table, a tracking server,
-// or even a DHT.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.PeerRouting instead
-type PeerRouting = routing.PeerRouting
-
-// ValueStore is a basic Put/Get interface.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.ValueStore instead
-type ValueStore = routing.ValueStore
-
-// Routing is the combination of different routing types supported by libp2p.
-// It can be satisfied by a single item (such as a DHT) or multiple different
-// pieces that are more optimized to each task.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.Routing instead
-type Routing = routing.Routing
-
-// PubKeyFetcher is an interfaces that should be implemented by value stores
-// that can optimize retrieval of public keys.
-//
-// TODO(steb): Consider removing, see https://github.com/libp2p/go-libp2p-routing/issues/22.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.PubkeyFetcher instead
-type PubKeyFetcher = routing.PubKeyFetcher
-
-// KeyForPublicKey returns the key used to retrieve public keys
-// from a value store.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.KeyForPublicKey instead
-func KeyForPublicKey(id peer.ID) string {
- return routing.KeyForPublicKey(id)
-}
-
-// GetPublicKey retrieves the public key associated with the given peer ID from
-// the value store.
-//
-// If the ValueStore is also a PubKeyFetcher, this method will call GetPublicKey
-// (which may be better optimized) instead of GetValue.
-// Deprecated: use github.com/libp2p/go-libp2p/core/routing.GetPublicKey instead
-func GetPublicKey(r ValueStore, ctx context.Context, p peer.ID) (ci.PubKey, error) {
- return routing.GetPublicKey(r, ctx, p)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/README.md b/vendor/github.com/libp2p/go-libp2p-kad-dht/README.md
index c774998a3..8a9eb57af 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/README.md
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/README.md
@@ -2,12 +2,10 @@
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io)
-[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23yellow)
[![GoDoc](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht?status.svg)](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht)
-[![Build Status](https://travis-ci.org/libp2p/go-libp2p-kad-dht.svg?branch=master)](https://travis-ci.org/libp2p/go-libp2p-kad-dht)
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
-> A Kademlia DHT implementation on go-libp2p
+> A Go implementation of [libp2p Kademlia DHT specification](https://github.com/libp2p/specs/tree/master/kad-dht)
## Table of Contents
@@ -37,7 +35,3 @@ Small note: If editing the README, please conform to the [standard-readme](https
## License
[MIT](LICENSE) © Protocol Labs Inc.
-
----
-
-The last gx published version of this module was: 4.4.34: QmXuNFLZc6Nb5akB4sZsxK3doShsFKT1sZFvxLXJvZQwAW
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/dht.go b/vendor/github.com/libp2p/go-libp2p-kad-dht/dht.go
index 924c9bb6a..64b5d4911 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/dht.go
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/dht.go
@@ -106,8 +106,7 @@ type IpfsDHT struct {
// DHT protocols we query with. We'll only add peers to our routing
// table if they speak these protocols.
- protocols []protocol.ID
- protocolsStrs []string
+ protocols []protocol.ID
// DHT protocols we can respond to.
serverProtocols []protocol.ID
@@ -285,7 +284,6 @@ func makeDHT(ctx context.Context, h host.Host, cfg dhtcfg.Config) (*IpfsDHT, err
host: h,
birth: time.Now(),
protocols: protocols,
- protocolsStrs: protocol.ConvertToStrings(protocols),
serverProtocols: serverProtocols,
bucketSize: cfg.BucketSize,
alpha: cfg.Concurrency,
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/config.go b/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/config.go
index 9d9ae0ba1..7a1d14fca 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/config.go
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/config.go
@@ -114,7 +114,7 @@ var Defaults = func(o *Config) error {
o.RoutingTable.RefreshInterval = 10 * time.Minute
o.RoutingTable.AutoRefresh = true
o.RoutingTable.PeerFilter = EmptyRTFilter
- o.MaxRecordAge = time.Hour * 36
+ o.MaxRecordAge = providers.ProvideValidity
o.BucketSize = defaultBucketSize
o.Concurrency = 10
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/net/message_manager.go b/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/net/message_manager.go
index ad7f0c84d..f04dd0889 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/net/message_manager.go
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/internal/net/message_manager.go
@@ -15,6 +15,8 @@ import (
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-msgio"
+
+ //lint:ignore SA1019 TODO migrate away from gogo pb
"github.com/libp2p/go-msgio/protoio"
"go.opencensus.io/stats"
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/providers/providers_manager.go b/vendor/github.com/libp2p/go-libp2p-kad-dht/providers/providers_manager.go
index 589fe418d..a8099a424 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/providers/providers_manager.go
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/providers/providers_manager.go
@@ -25,8 +25,14 @@ import (
// keys stored in the data store.
const ProvidersKeyPrefix = "/providers/"
-// ProvideValidity is the default time that a provider record should last
-var ProvideValidity = time.Hour * 24
+// ProviderAddrTTL is the TTL of an address we've received from a provider.
+// This is also a temporary address, but lasts longer. After this expires,
+// the records we return will require an extra lookup.
+const ProviderAddrTTL = time.Minute * 30
+
+// ProvideValidity is the default time that a Provider Record should last on DHT
+// This value is also known as Provider Record Expiration Interval.
+var ProvideValidity = time.Hour * 48
var defaultCleanupInterval = time.Hour
var lruCacheSize = 256
var batchBufferSize = 256
@@ -232,7 +238,7 @@ func (pm *ProviderManager) run(ctx context.Context, proc goprocess.Process) {
// AddProvider adds a provider
func (pm *ProviderManager) AddProvider(ctx context.Context, k []byte, provInfo peer.AddrInfo) error {
if provInfo.ID != pm.self { // don't add own addrs.
- pm.pstore.AddAddrs(provInfo.ID, provInfo.Addrs, peerstore.ProviderAddrTTL)
+ pm.pstore.AddAddrs(provInfo.ID, provInfo.Addrs, ProviderAddrTTL)
}
prov := &addProv{
ctx: ctx,
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/subscriber_notifee.go b/vendor/github.com/libp2p/go-libp2p-kad-dht/subscriber_notifee.go
index a4ff2c3bf..ec9eca146 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/subscriber_notifee.go
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/subscriber_notifee.go
@@ -146,7 +146,7 @@ func handleLocalReachabilityChangedEvent(dht *IpfsDHT, e event.EvtLocalReachabil
// supporting the primary protocols, we do not want to add peers that are speaking obsolete secondary protocols to our
// routing table
func (dht *IpfsDHT) validRTPeer(p peer.ID) (bool, error) {
- b, err := dht.peerstore.FirstSupportedProtocol(p, dht.protocolsStrs...)
+ b, err := dht.peerstore.FirstSupportedProtocol(p, dht.protocols...)
if len(b) == 0 || err != nil {
return false, err
}
diff --git a/vendor/github.com/libp2p/go-libp2p-kad-dht/version.json b/vendor/github.com/libp2p/go-libp2p-kad-dht/version.json
index 5775de3b2..c6a304fe7 100644
--- a/vendor/github.com/libp2p/go-libp2p-kad-dht/version.json
+++ b/vendor/github.com/libp2p/go-libp2p-kad-dht/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.18.0"
+ "version": "v0.21.0"
}
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/.travis.yml b/vendor/github.com/libp2p/go-libp2p-kbucket/.travis.yml
deleted file mode 100644
index a156d3eb5..000000000
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-os:
- - linux
-
-language: go
-
-go:
- - 1.13.x
-
-env:
- global:
- - GOTFLAGS="-race"
- matrix:
- - BUILD_DEPTYPE=gomod
-
-
-# disable travis install
-install:
- - true
-
-script:
- - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh)
-
-
-cache:
- directories:
- - $GOPATH/pkg/mod
- - $HOME/.cache/go-build
-
-notifications:
- email: false
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/bucket.go b/vendor/github.com/libp2p/go-libp2p-kbucket/bucket.go
index a9a781d55..22975582b 100644
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/bucket.go
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/bucket.go
@@ -6,7 +6,7 @@ import (
"container/list"
"time"
- "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/libp2p/go-libp2p/core/peer"
)
// PeerInfo holds all related information for a peer in the K-Bucket.
@@ -120,15 +120,6 @@ func (b *bucket) remove(id peer.ID) bool {
return false
}
-func (b *bucket) moveToFront(id peer.ID) {
-
- for e := b.list.Front(); e != nil; e = e.Next() {
- if e.Value.(*PeerInfo).Id == id {
- b.list.MoveToFront(e)
- }
- }
-}
-
func (b *bucket) pushFront(p *PeerInfo) {
b.list.PushFront(p)
}
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/peerdiversity/filter.go b/vendor/github.com/libp2p/go-libp2p-kbucket/peerdiversity/filter.go
index 2d2b7813a..36a0593b0 100644
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/peerdiversity/filter.go
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/peerdiversity/filter.go
@@ -7,7 +7,7 @@ import (
"sort"
"sync"
- "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-cidranger"
asnutil "github.com/libp2p/go-libp2p-asn-util"
@@ -27,9 +27,9 @@ type asnStore interface {
// A peer has one PeerIPGroupKey per address. Thus, a peer can belong to MULTIPLE Groups if it has
// multiple addresses.
// For now, given a peer address, our grouping mechanism is as follows:
-// 1. For IPv6 addresses, we group by the ASN of the IP address.
-// 2. For IPv4 addresses, all addresses that belong to same legacy (Class A)/8 allocations
-// OR share the same /16 prefix are in the same group.
+// 1. For IPv6 addresses, we group by the ASN of the IP address.
+// 2. For IPv4 addresses, all addresses that belong to same legacy (Class A)/8 allocations
+// OR share the same /16 prefix are in the same group.
type PeerIPGroupKey string
// https://en.wikipedia.org/wiki/List_of_assigned_/8_IPv4_address_blocks
@@ -153,7 +153,7 @@ func (f *Filter) TryAdd(p peer.ID) bool {
// don't allow peers for which we can't determine addresses.
addrs := f.pgm.PeerAddresses(p)
if len(addrs) == 0 {
- dfLog.Debugw("no addresses found for peer", "appKey", f.logKey, "peer", p.Pretty())
+ dfLog.Debugw("no addresses found for peer", "appKey", f.logKey, "peer", p)
return false
}
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/sorting.go b/vendor/github.com/libp2p/go-libp2p-kbucket/sorting.go
index 1999099be..d25bbe047 100644
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/sorting.go
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/sorting.go
@@ -4,7 +4,7 @@ import (
"container/list"
"sort"
- "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/libp2p/go-libp2p/core/peer"
)
// A helper struct to sort peers by their distance to the local node
@@ -46,7 +46,7 @@ func (pds *peerDistanceSorter) sort() {
sort.Sort(pds)
}
-// Sort the given peers by their ascending distance from the target. A new slice is returned.
+// SortClosestPeers Sort the given peers by their ascending distance from the target. A new slice is returned.
func SortClosestPeers(peers []peer.ID, target ID) []peer.ID {
sorter := peerDistanceSorter{
peers: make([]peerDistance, 0, len(peers)),
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/table.go b/vendor/github.com/libp2p/go-libp2p-kbucket/table.go
index f5a7741a2..956fe8b51 100644
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/table.go
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/table.go
@@ -1,4 +1,4 @@
-// package kbucket implements a kademlia 'k-bucket' routing table.
+// Package kbucket implements a kademlia 'k-bucket' routing table.
package kbucket
import (
@@ -8,8 +8,8 @@ import (
"sync"
"time"
- "github.com/libp2p/go-libp2p-core/peer"
- "github.com/libp2p/go-libp2p-core/peerstore"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
@@ -92,7 +92,7 @@ func (rt *RoutingTable) Close() error {
return nil
}
-// NPeersForCPL returns the number of peers we have for a given Cpl
+// NPeersForCpl returns the number of peers we have for a given Cpl
func (rt *RoutingTable) NPeersForCpl(cpl uint) int {
rt.tabLock.RLock()
defer rt.tabLock.RUnlock()
@@ -127,7 +127,7 @@ func (rt *RoutingTable) NPeersForCpl(cpl uint) int {
// whose LastSuccessfulOutboundQuery is above the maximum allowed threshold in that bucket with the new peer.
// If no such peer exists in that bucket, we do NOT add the peer to the Routing Table and return error "ErrPeerRejectedNoCapacity".
-// It returns a boolean value set to true if the peer was newly added to the Routing Table, false otherwise.
+// TryAddPeer returns a boolean value set to true if the peer was newly added to the Routing Table, false otherwise.
// It also returns any error that occurred while adding the peer to the Routing Table. If the error is not nil,
// the boolean value will ALWAYS be false i.e. the peer wont be added to the Routing Table it it's not already there.
//
@@ -151,11 +151,11 @@ func (rt *RoutingTable) addPeer(p peer.ID, queryPeer bool, isReplaceable bool) (
}
// peer already exists in the Routing Table.
- if peer := bucket.getPeer(p); peer != nil {
+ if peerInfo := bucket.getPeer(p); peerInfo != nil {
// if we're querying the peer first time after adding it, let's give it a
// usefulness bump. This will ONLY happen once.
- if peer.LastUsefulAt.IsZero() && queryPeer {
- peer.LastUsefulAt = lastUsefulAt
+ if peerInfo.LastUsefulAt.IsZero() && queryPeer {
+ peerInfo.LastUsefulAt = lastUsefulAt
}
return false, nil
}
@@ -264,14 +264,12 @@ func (rt *RoutingTable) GetPeerInfos() []PeerInfo {
var pis []PeerInfo
for _, b := range rt.buckets {
- for _, p := range b.peers() {
- pis = append(pis, p)
- }
+ pis = append(pis, b.peers()...)
}
return pis
}
-// UpdateLastSuccessfulOutboundQuery updates the LastSuccessfulOutboundQueryAt time of the peer.
+// UpdateLastSuccessfulOutboundQueryAt updates the LastSuccessfulOutboundQueryAt time of the peer.
// Returns true if the update was successful, false otherwise.
func (rt *RoutingTable) UpdateLastSuccessfulOutboundQueryAt(p peer.ID, t time.Time) bool {
rt.tabLock.Lock()
@@ -477,7 +475,7 @@ func (rt *RoutingTable) Print() {
for e := b.list.Front(); e != nil; e = e.Next() {
p := e.Value.(*PeerInfo).Id
- fmt.Printf("\t\t- %s %s\n", p.Pretty(), rt.metrics.LatencyEWMA(p).String())
+ fmt.Printf("\t\t- %s %s\n", p.String(), rt.metrics.LatencyEWMA(p).String())
}
}
rt.tabLock.RUnlock()
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/table_refresh.go b/vendor/github.com/libp2p/go-libp2p-kbucket/table_refresh.go
index 2224a41ce..515604085 100644
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/table_refresh.go
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/table_refresh.go
@@ -6,7 +6,7 @@ import (
"fmt"
"time"
- "github.com/libp2p/go-libp2p-core/peer"
+ "github.com/libp2p/go-libp2p/core/peer"
mh "github.com/multiformats/go-multihash"
)
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/util.go b/vendor/github.com/libp2p/go-libp2p-kbucket/util.go
index aa7ce274a..7b16ab176 100644
--- a/vendor/github.com/libp2p/go-libp2p-kbucket/util.go
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/util.go
@@ -1,17 +1,16 @@
package kbucket
import (
- "bytes"
"errors"
+ "github.com/minio/sha256-simd"
- "github.com/libp2p/go-libp2p-core/peer"
ks "github.com/libp2p/go-libp2p-kbucket/keyspace"
+ "github.com/libp2p/go-libp2p/core/peer"
u "github.com/ipfs/go-ipfs-util"
- "github.com/minio/sha256-simd"
)
-// Returned if a routing table query returns no results. This is NOT expected
+// ErrLookupFailure is returned if a routing table query returns no results. This is NOT expected
// behaviour
var ErrLookupFailure = errors.New("failed to find any peer in table")
@@ -21,10 +20,6 @@ var ErrLookupFailure = errors.New("failed to find any peer in table")
// peer.ID or a util.Key. This unifies the keyspace
type ID []byte
-func (id ID) equal(other ID) bool {
- return bytes.Equal(id, other)
-}
-
func (id ID) less(other ID) bool {
a := ks.Key{Space: ks.XORKeySpace, Bytes: id}
b := ks.Key{Space: ks.XORKeySpace, Bytes: other}
diff --git a/vendor/github.com/libp2p/go-libp2p-kbucket/version.json b/vendor/github.com/libp2p/go-libp2p-kbucket/version.json
new file mode 100644
index 000000000..fc15ae013
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p-kbucket/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.5.0"
+}
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/.travis.yml b/vendor/github.com/libp2p/go-libp2p-routing-helpers/.travis.yml
deleted file mode 100644
index a156d3eb5..000000000
--- a/vendor/github.com/libp2p/go-libp2p-routing-helpers/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-os:
- - linux
-
-language: go
-
-go:
- - 1.13.x
-
-env:
- global:
- - GOTFLAGS="-race"
- matrix:
- - BUILD_DEPTYPE=gomod
-
-
-# disable travis install
-install:
- - true
-
-script:
- - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh)
-
-
-cache:
- directories:
- - $GOPATH/pkg/mod
- - $HOME/.cache/go-build
-
-notifications:
- email: false
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/compconfig.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/compconfig.go
new file mode 100644
index 000000000..b3bd85fe6
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/compconfig.go
@@ -0,0 +1,27 @@
+package routinghelpers
+
+import (
+ "context"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/multiformats/go-multihash"
+)
+
+type ParallelRouter struct {
+ Timeout time.Duration
+ IgnoreError bool
+ Router routing.Routing
+ ExecuteAfter time.Duration
+}
+
+type SequentialRouter struct {
+ Timeout time.Duration
+ IgnoreError bool
+ Router routing.Routing
+}
+
+type ProvideManyRouter interface {
+ ProvideMany(ctx context.Context, keys []multihash.Multihash) error
+ Ready() bool
+}
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/composed.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/composed.go
index 69b2ea0a6..d16096f87 100644
--- a/vendor/github.com/libp2p/go-libp2p-routing-helpers/composed.go
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/composed.go
@@ -3,12 +3,11 @@ package routinghelpers
import (
"context"
- ci "github.com/libp2p/go-libp2p-core/crypto"
- "github.com/libp2p/go-libp2p-core/peer"
- "github.com/libp2p/go-libp2p-core/routing"
-
- multierror "github.com/hashicorp/go-multierror"
- cid "github.com/ipfs/go-cid"
+ "github.com/hashicorp/go-multierror"
+ "github.com/ipfs/go-cid"
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
)
// Compose composes the components into a single router. Not specifying a
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/compparallel.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/compparallel.go
new file mode 100644
index 000000000..3c382397b
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/compparallel.go
@@ -0,0 +1,323 @@
+package routinghelpers
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/multiformats/go-multihash"
+)
+
+var _ routing.Routing = &composableParallel{}
+var _ ProvideManyRouter = &composableParallel{}
+
+type composableParallel struct {
+ routers []*ParallelRouter
+}
+
+// NewComposableParallel creates a Router that will execute methods from provided Routers in parallel.
+// On all methods, If IgnoreError flag is set, that Router will not stop the entire execution.
+// On all methods, If ExecuteAfter is set, that Router will be executed after the timer.
+// Router specific timeout will start counting AFTER the ExecuteAfter timer.
+func NewComposableParallel(routers []*ParallelRouter) *composableParallel {
+ return &composableParallel{
+ routers: routers,
+ }
+}
+
+// Provide will call all Routers in parallel.
+func (r *composableParallel) Provide(ctx context.Context, cid cid.Cid, provide bool) error {
+ return executeParallel(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ return r.Provide(ctx, cid, provide)
+ },
+ )
+}
+
+// ProvideMany will call all supported Routers in parallel.
+func (r *composableParallel) ProvideMany(ctx context.Context, keys []multihash.Multihash) error {
+ return executeParallel(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ pm, ok := r.(ProvideManyRouter)
+ if !ok {
+ return nil
+ }
+ return pm.ProvideMany(ctx, keys)
+ },
+ )
+}
+
+// Ready will call all supported ProvideMany Routers SEQUENTIALLY.
+// If some of them are not ready, this method will return false.
+func (r *composableParallel) Ready() bool {
+ for _, ro := range r.routers {
+ pm, ok := ro.Router.(ProvideManyRouter)
+ if !ok {
+ continue
+ }
+
+ if !pm.Ready() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// FindProvidersAsync will execute all Routers in parallel, iterating results from them in unspecified order.
+// If count is set, only that amount of elements will be returned without any specification about from what router is obtained.
+// To gather providers from a set of Routers first, you can use the ExecuteAfter timer to delay some Router execution.
+func (r *composableParallel) FindProvidersAsync(ctx context.Context, cid cid.Cid, count int) <-chan peer.AddrInfo {
+ var totalCount int64
+ ch, _ := getChannelOrErrorParallel(
+ ctx,
+ r.routers,
+ func(ctx context.Context, r routing.Routing) (<-chan peer.AddrInfo, error) {
+ return r.FindProvidersAsync(ctx, cid, count), nil
+ },
+ func() bool {
+ return atomic.AddInt64(&totalCount, 1) > int64(count) && count != 0
+ },
+ )
+
+ return ch
+}
+
+// FindPeer will execute all Routers in parallel, getting the first AddrInfo found and cancelling all other Router calls.
+func (r *composableParallel) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) {
+ return getValueOrErrorParallel(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) (peer.AddrInfo, bool, error) {
+ addr, err := r.FindPeer(ctx, id)
+ return addr, addr.ID == "", err
+ },
+ )
+}
+
+// PutValue will execute all Routers in parallel. If a Router fails and IgnoreError flag is not set, the whole execution will fail.
+// Some Puts before the failure might be successful, even if we return an error.
+func (r *composableParallel) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) error {
+ return executeParallel(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ return r.PutValue(ctx, key, val, opts...)
+ },
+ )
+}
+
+// GetValue will execute all Routers in parallel. The first value found will be returned, cancelling all other executions.
+func (r *composableParallel) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) {
+ return getValueOrErrorParallel(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) ([]byte, bool, error) {
+ val, err := r.GetValue(ctx, key, opts...)
+ return val, len(val) == 0, err
+ })
+}
+
+func (r *composableParallel) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) {
+ return getChannelOrErrorParallel(
+ ctx,
+ r.routers,
+ func(ctx context.Context, r routing.Routing) (<-chan []byte, error) {
+ return r.SearchValue(ctx, key, opts...)
+ },
+ func() bool { return false },
+ )
+}
+
+func (r *composableParallel) Bootstrap(ctx context.Context) error {
+ return executeParallel(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ return r.Bootstrap(ctx)
+ })
+}
+
+func getValueOrErrorParallel[T any](
+ ctx context.Context,
+ routers []*ParallelRouter,
+ f func(context.Context, routing.Routing) (T, bool, error),
+) (value T, err error) {
+ outCh := make(chan T)
+ errCh := make(chan error)
+
+ // global cancel context to stop early other router's execution.
+ ctx, cancelAll := context.WithCancel(ctx)
+ defer cancelAll()
+ var wg sync.WaitGroup
+ for _, r := range routers {
+ wg.Add(1)
+ go func(r *ParallelRouter) {
+ defer wg.Done()
+ tim := time.NewTimer(r.ExecuteAfter)
+ defer tim.Stop()
+ select {
+ case <-ctx.Done():
+ case <-tim.C:
+ ctx, cancel := context.WithTimeout(ctx, r.Timeout)
+ defer cancel()
+ value, empty, err := f(ctx, r.Router)
+ if err != nil &&
+ !errors.Is(err, routing.ErrNotFound) &&
+ !r.IgnoreError {
+ select {
+ case <-ctx.Done():
+ case errCh <- err:
+ }
+ return
+ }
+ if empty {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case outCh <- value:
+ }
+ }
+ }(r)
+ }
+
+ // goroutine closing everything when finishing execution
+ go func() {
+ wg.Wait()
+ close(outCh)
+ close(errCh)
+ }()
+
+ select {
+ case out, ok := <-outCh:
+ if !ok {
+ return value, routing.ErrNotFound
+ }
+ return out, nil
+ case err, ok := <-errCh:
+ if !ok {
+ return value, routing.ErrNotFound
+ }
+ return value, err
+ case <-ctx.Done():
+ return value, ctx.Err()
+ }
+}
+
+func executeParallel(
+ ctx context.Context,
+ routers []*ParallelRouter,
+ f func(context.Context, routing.Routing,
+ ) error) error {
+ var wg sync.WaitGroup
+ errCh := make(chan error)
+ for _, r := range routers {
+ wg.Add(1)
+ go func(r *ParallelRouter) {
+ defer wg.Done()
+ tim := time.NewTimer(r.ExecuteAfter)
+ defer tim.Stop()
+ select {
+ case <-ctx.Done():
+ if !r.IgnoreError {
+ errCh <- ctx.Err()
+ }
+ case <-tim.C:
+ ctx, cancel := context.WithTimeout(ctx, r.Timeout)
+ defer cancel()
+ err := f(ctx, r.Router)
+ if err != nil &&
+ !r.IgnoreError {
+ errCh <- err
+ }
+ }
+ }(r)
+ }
+
+ go func() {
+ wg.Wait()
+ close(errCh)
+ }()
+
+ var errOut error
+ for err := range errCh {
+ errOut = multierror.Append(errOut, err)
+ }
+
+ return errOut
+}
+
+func getChannelOrErrorParallel[T any](
+ ctx context.Context,
+ routers []*ParallelRouter,
+ f func(context.Context, routing.Routing) (<-chan T, error),
+ shouldStop func() bool,
+) (chan T, error) {
+ outCh := make(chan T)
+ errCh := make(chan error)
+ var wg sync.WaitGroup
+ ctx, cancelAll := context.WithCancel(ctx)
+ for _, r := range routers {
+ wg.Add(1)
+ go func(r *ParallelRouter) {
+ defer wg.Done()
+ tim := time.NewTimer(r.ExecuteAfter)
+ defer tim.Stop()
+ select {
+ case <-ctx.Done():
+ return
+ case <-tim.C:
+ ctx, cancel := context.WithTimeout(ctx, r.Timeout)
+ defer cancel()
+ valueChan, err := f(ctx, r.Router)
+ if err != nil && !r.IgnoreError {
+ select {
+ case <-ctx.Done():
+ case errCh <- err:
+ }
+ return
+ }
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case val, ok := <-valueChan:
+ if !ok {
+ return
+ }
+
+ if shouldStop() {
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ return
+ case outCh <- val:
+ }
+ }
+ }
+ }
+ }(r)
+ }
+
+ // goroutine closing everything when finishing execution
+ go func() {
+ wg.Wait()
+ close(outCh)
+ close(errCh)
+ cancelAll()
+ }()
+
+ select {
+ case err, ok := <-errCh:
+ if !ok {
+ return nil, routing.ErrNotFound
+ }
+ return nil, err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ return outCh, nil
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/compsequential.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/compsequential.go
new file mode 100644
index 000000000..111185bdb
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/compsequential.go
@@ -0,0 +1,238 @@
+package routinghelpers
+
+import (
+ "context"
+ "errors"
+ "sync/atomic"
+
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/multiformats/go-multihash"
+)
+
+var _ routing.Routing = &composableSequential{}
+
+type composableSequential struct {
+ routers []*SequentialRouter
+}
+
+func NewComposableSequential(routers []*SequentialRouter) *composableSequential {
+ return &composableSequential{
+ routers: routers,
+ }
+}
+
+// Provide calls Provide method per each router sequentially.
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+func (r *composableSequential) Provide(ctx context.Context, cid cid.Cid, provide bool) error {
+ return executeSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ return r.Provide(ctx, cid, provide)
+ })
+}
+
+// ProvideMany will call all supported Routers sequentially.
+func (r *composableSequential) ProvideMany(ctx context.Context, keys []multihash.Multihash) error {
+ return executeSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ pm, ok := r.(ProvideManyRouter)
+ if !ok {
+ return nil
+ }
+ return pm.ProvideMany(ctx, keys)
+ },
+ )
+}
+
+// Ready will call all supported ProvideMany Routers sequentially.
+// If some of them are not ready, this method will return false.
+func (r *composableSequential) Ready() bool {
+ for _, ro := range r.routers {
+ pm, ok := ro.Router.(ProvideManyRouter)
+ if !ok {
+ continue
+ }
+
+ if !pm.Ready() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// FindProvidersAsync calls FindProvidersAsync per each router sequentially.
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+// If count is set, the channel will return up to count results, stopping routers iteration.
+func (r *composableSequential) FindProvidersAsync(ctx context.Context, cid cid.Cid, count int) <-chan peer.AddrInfo {
+ var totalCount int64
+ return getChannelOrErrorSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) (<-chan peer.AddrInfo, error) {
+ return r.FindProvidersAsync(ctx, cid, count), nil
+ },
+ func() bool {
+ return atomic.AddInt64(&totalCount, 1) > int64(count) && count != 0
+ },
+ )
+}
+
+// FindPeer calls FindPeer per each router sequentially.
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+func (r *composableSequential) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInfo, error) {
+ return getValueOrErrorSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) (peer.AddrInfo, bool, error) {
+ addr, err := r.FindPeer(ctx, pid)
+ return addr, addr.ID == "", err
+ },
+ )
+}
+
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+func (r *composableSequential) PutValue(ctx context.Context, key string, val []byte, opts ...routing.Option) error {
+ return executeSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ return r.PutValue(ctx, key, val, opts...)
+ })
+}
+
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+func (r *composableSequential) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) {
+ return getValueOrErrorSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) ([]byte, bool, error) {
+ val, err := r.GetValue(ctx, key, opts...)
+ return val, len(val) == 0, err
+ },
+ )
+}
+
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+func (r *composableSequential) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) {
+ ch := getChannelOrErrorSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) (<-chan []byte, error) {
+ return r.SearchValue(ctx, key, opts...)
+ },
+ func() bool { return false },
+ )
+
+ return ch, nil
+
+}
+
+// If some router fails and the IgnoreError flag is true, we continue to the next router.
+// Context timeout error will be also ignored if the flag is set.
+func (r *composableSequential) Bootstrap(ctx context.Context) error {
+ return executeSequential(ctx, r.routers,
+ func(ctx context.Context, r routing.Routing) error {
+ return r.Bootstrap(ctx)
+ },
+ )
+}
+
+func getValueOrErrorSequential[T any](
+ ctx context.Context,
+ routers []*SequentialRouter,
+ f func(context.Context, routing.Routing) (T, bool, error),
+) (value T, err error) {
+ for _, router := range routers {
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return value, ctxErr
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, router.Timeout)
+ defer cancel()
+ value, empty, err := f(ctx, router.Router)
+ if err != nil &&
+ !errors.Is(err, routing.ErrNotFound) &&
+ !router.IgnoreError {
+ return value, err
+ }
+
+ if empty {
+ continue
+ }
+
+ return value, nil
+ }
+
+ return value, routing.ErrNotFound
+}
+
+func executeSequential(
+ ctx context.Context,
+ routers []*SequentialRouter,
+ f func(context.Context, routing.Routing,
+ ) error) error {
+ for _, router := range routers {
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return ctxErr
+ }
+ ctx, cancel := context.WithTimeout(ctx, router.Timeout)
+ if err := f(ctx, router.Router); err != nil &&
+ !errors.Is(err, routing.ErrNotFound) &&
+ !router.IgnoreError {
+ cancel()
+ return err
+ }
+ cancel()
+ }
+
+ return nil
+}
+
+func getChannelOrErrorSequential[T any](
+ ctx context.Context,
+ routers []*SequentialRouter,
+ f func(context.Context, routing.Routing) (<-chan T, error),
+ shouldStop func() bool,
+) chan T {
+ chanOut := make(chan T)
+
+ go func() {
+ for _, router := range routers {
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ close(chanOut)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, router.Timeout)
+ rch, err := f(ctx, router.Router)
+ if err != nil &&
+ !errors.Is(err, routing.ErrNotFound) &&
+ !router.IgnoreError {
+ cancel()
+ break
+ }
+
+ f:
+ for {
+ select {
+ case <-ctx.Done():
+ break f
+ case v, ok := <-rch:
+ if !ok {
+ break f
+ }
+ select {
+ case <-ctx.Done():
+ break f
+ case chanOut <- v:
+ }
+
+ }
+ }
+
+ cancel()
+ }
+
+ close(chanOut)
+ }()
+
+ return chanOut
+}
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/limited.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/limited.go
index 04ad901f8..143e9f05c 100644
--- a/vendor/github.com/libp2p/go-libp2p-routing-helpers/limited.go
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/limited.go
@@ -5,9 +5,9 @@ import (
"io"
"strings"
- ci "github.com/libp2p/go-libp2p-core/crypto"
- "github.com/libp2p/go-libp2p-core/peer"
- "github.com/libp2p/go-libp2p-core/routing"
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
)
// LimitedValueStore limits the internal value store to the given namespaces.
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/null.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/null.go
index d4f424a44..b2ed2ab7f 100644
--- a/vendor/github.com/libp2p/go-libp2p-routing-helpers/null.go
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/null.go
@@ -3,10 +3,9 @@ package routinghelpers
import (
"context"
- "github.com/libp2p/go-libp2p-core/peer"
- "github.com/libp2p/go-libp2p-core/routing"
-
"github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
)
// Null is a router that doesn't do anything.
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/parallel.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/parallel.go
index 389adc0ba..fece5bc4a 100644
--- a/vendor/github.com/libp2p/go-libp2p-routing-helpers/parallel.go
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/parallel.go
@@ -7,13 +7,12 @@ import (
"reflect"
"sync"
- ci "github.com/libp2p/go-libp2p-core/crypto"
- "github.com/libp2p/go-libp2p-core/peer"
- "github.com/libp2p/go-libp2p-core/routing"
-
- multierror "github.com/hashicorp/go-multierror"
- cid "github.com/ipfs/go-cid"
+ "github.com/hashicorp/go-multierror"
+ "github.com/ipfs/go-cid"
record "github.com/libp2p/go-libp2p-record"
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
)
// Parallel operates on the slice of routers in parallel.
@@ -171,17 +170,12 @@ func (r Parallel) search(ctx context.Context, do func(routing.Routing) (<-chan [
ctx, cancel := context.WithCancel(ctx)
out := make(chan []byte)
- var errs []error
var wg sync.WaitGroup
for _, ri := range r.Routers {
vchan, err := do(ri)
- switch err {
- case nil:
- case routing.ErrNotFound, routing.ErrNotSupported:
+ if err != nil {
continue
- default:
- errs = append(errs, err)
}
wg.Add(1)
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/tiered.go b/vendor/github.com/libp2p/go-libp2p-routing-helpers/tiered.go
index 829be2183..85313ca51 100644
--- a/vendor/github.com/libp2p/go-libp2p-routing-helpers/tiered.go
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/tiered.go
@@ -4,13 +4,12 @@ import (
"context"
"io"
- ci "github.com/libp2p/go-libp2p-core/crypto"
- "github.com/libp2p/go-libp2p-core/peer"
- "github.com/libp2p/go-libp2p-core/routing"
-
- multierror "github.com/hashicorp/go-multierror"
- cid "github.com/ipfs/go-cid"
+ "github.com/hashicorp/go-multierror"
+ "github.com/ipfs/go-cid"
record "github.com/libp2p/go-libp2p-record"
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
)
// Tiered is like the Parallel except that GetValue and FindPeer
@@ -66,7 +65,7 @@ func (r Tiered) GetValue(ctx context.Context, key string, opts ...routing.Option
// returning results in monotonically increasing "freshness" from all
// sub-routers.
func (r Tiered) SearchValue(ctx context.Context, key string, opts ...routing.Option) (<-chan []byte, error) {
- return Parallel{Routers: r.Routers, Validator: r.Validator}.SearchValue(ctx, key, opts...)
+ return Parallel(r).SearchValue(ctx, key, opts...)
}
// GetPublicKey sequentially searches each sub-router for the the public key,
diff --git a/vendor/github.com/libp2p/go-libp2p-routing-helpers/version.json b/vendor/github.com/libp2p/go-libp2p-routing-helpers/version.json
new file mode 100644
index 000000000..372b6eab3
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p-routing-helpers/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.4.0"
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/.gitignore b/vendor/github.com/libp2p/go-libp2p/.gitignore
index a505ae07f..64c6d853d 100644
--- a/vendor/github.com/libp2p/go-libp2p/.gitignore
+++ b/vendor/github.com/libp2p/go-libp2p/.gitignore
@@ -1,2 +1,6 @@
*.swp
.idea
+*.qlog
+*.sqlog
+*.qlog.zst
+*.sqlog.zst
diff --git a/vendor/github.com/libp2p/go-libp2p/CHANGELOG.md b/vendor/github.com/libp2p/go-libp2p/CHANGELOG.md
new file mode 100644
index 000000000..2306f0ff8
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/CHANGELOG.md
@@ -0,0 +1,293 @@
+# Table Of Contents
+- [v0.28.0](#v0280)
+- [v0.27.0](#v0270)
+- [v0.26.4](#v0264)
+- [v0.26.3](#v0263)
+- [v0.26.2](#v0262)
+- [v0.26.1](#v0261)
+- [v0.26.0](#v0260)
+- [v0.25.1](#v0251)
+- [v0.25.0](#v0250)
+
+# [v0.28.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.28.0)
+
+## 🔦 Highlights
+
+### Smart Dialing
+
+This release introduces smart dialing logic. Currently, libp2p dials all addresses of a remote peer in parallel, and
+aborts all outstanding dials as soon as the first one succeeds.
+Dialing many addresses in parallel creates a lot of churn on the client side, and unnecessary load on the network and
+on the server side, and is heavily discouraged by the networking community (see [RFC 8305](https://www.rfc-editor.org/rfc/rfc8305) for example).
+
+When connecting to a peer we first determine the order to dial its addresses. This ranking logic considers a number of corner cases
+described in detail in the documentation of the swarm package (`swarm.DefaultDialRanker`).
+At a high level, this is what happens:
+* If a peer offers a WebTransport and a QUIC address (on the same IP:port), the QUIC address is preferred.
+* If a peer has a QUIC and a TCP address, the QUIC address is dialed first. Only if the connection attempt doesn't succeed within 250ms, a TCP connection is started.
+
+Our measurements on the IPFS network show that for >90% of established libp2p connections, the first connection attempt succeeds,
+leading a dramatic decrease in the number of aborted connection attempts.
+
+We also added new metrics to the swarm Grafana dashboard, showing:
+* The number of connection attempts it took to establish a connection
+* The delay introduced by the ranking logic
+
+This feature should be safe to enable for nodes running in data centers and for most nodes in home networks.
+However, there are some (mostly home and corporate networks) that block all UDP traffic. If enabled, the current implementation
+of the smart dialing logic will lead to a regression, since it preferes QUIC addresses over TCP addresses. Nodes would still be
+able to connect, but connection establishment of the TCP connection would be delayed by 250ms.
+
+In a future release (see #1605 for details), we will introduce a feature called blackhole detection. By observing the outcome of
+QUIC connection attempts, we can determine if UDP traffic is blocked (namely, if all QUIC connection attempts fail), and stop
+dialing QUIC in this case altogether. Once this detection logic is in place, smart dialing will be enabled by default.
+
+### More Metrics!
+Since the last release, we've added metrics for:
+* [Holepunching](https://github.com/libp2p/go-libp2p/pull/2246)
+* Smart Dialing (see above)
+
+### WebTransport
+* [#2251](https://github.com/libp2p/go-libp2p/pull/2251): Infer public WebTransport address from `quic-v1` addresses if both transports are using the same port for both quic-v1 and WebTransport addresses.
+* [#2271](https://github.com/libp2p/go-libp2p/pull/2271): Only add certificate hashes to WebTransport mulitaddress if listening on WebTransport
+
+## Housekeeping updates
+* Identify
+ * [#2303](https://github.com/libp2p/go-libp2p/pull/2303): Don't send default protocol version
+ * Prevent polluting PeerStore with local addrs
+ * [#2325](https://github.com/libp2p/go-libp2p/pull/2325): Don't save signed peer records
+ * [#2300](https://github.com/libp2p/go-libp2p/pull/2300): Filter received addresses based on the node's remote address
+* WebSocket
+ * [#2280](https://github.com/libp2p/go-libp2p/pull/2280): Reverted back to the Gorilla library for WebSocket
+* NAT
+ * [#2248](https://github.com/libp2p/go-libp2p/pull/2248): Move NAT mapping logic out of the host
+
+## 🐞 Bugfixes
+* Identify
+ * [Reject signed peer records on peer ID mismatch](https://github.com/libp2p/go-libp2p/commit/8d771355b41297623e05b04a865d029a2522a074)
+ * [#2299](https://github.com/libp2p/go-libp2p/pull/2299): Avoid spuriously pushing updates
+* Swarm
+ * [#2322](https://github.com/libp2p/go-libp2p/pull/2322): Dedup addresses to dial
+ * [#2284](https://github.com/libp2p/go-libp2p/pull/2284): Change maps with multiaddress keys to use strings
+* QUIC
+ * [#2262](https://github.com/libp2p/go-libp2p/pull/2262): Prioritize listen connections for reuse
+ * [#2276](https://github.com/libp2p/go-libp2p/pull/2276): Don't panic when quic-go's accept call errors
+ * [#2263](https://github.com/libp2p/go-libp2p/pull/2263): Fix race condition when generating random holepunch packet
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.27.0...v0.28.0
+
+# [v0.27.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.27.0)
+
+### Breaking Changes
+
+* The `LocalPrivateKey` method was removed from the `network.Conn` interface. [#2144](https://github.com/libp2p/go-libp2p/pull/2144)
+
+## 🔦 Highlights
+
+### Additional metrics
+Since the last release, we've added metrics for:
+* [Relay Service](https://github.com/libp2p/go-libp2p/pull/2154): RequestStatus, RequestCounts, RejectionReasons for Reservation and Connection Requests,
+ConnectionDuration, BytesTransferred, Relay Service Status.
+* [Autorelay](https://github.com/libp2p/go-libp2p/pull/2185): relay finder status, reservation request outcomes, current reservations, candidate circuit v2 support, current candidates, relay addresses updated, num relay address, and scheduled work times
+
+## 🐞 Bugfixes
+
+* autonat: don't change status on dial request refused [2225](https://github.com/libp2p/go-libp2p/pull/2225)
+* relaysvc: fix flaky TestReachabilityChangeEvent [2215](https://github.com/libp2p/go-libp2p/pull/2215)
+* basichost: prevent duplicate dials [2196](https://github.com/libp2p/go-libp2p/pull/2196)
+* websocket: don't set a WSS multiaddr for accepted unencrypted conns [2199](https://github.com/libp2p/go-libp2p/pull/2199)
+* identify: Fix IdentifyWait when Connected events happen out of order [2173](https://github.com/libp2p/go-libp2p/pull/2173)
+* circuitv2: cleanup relay service properly [2164](https://github.com/libp2p/go-libp2p/pull/2164)
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.4...v0.27.0
+
+# [v0.26.4](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.4)
+
+This patch release fixes a busy-looping happening inside AutoRelay on private nodes, see [2208](https://github.com/libp2p/go-libp2p/pull/2208).
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.4
+
+# [v0.26.3](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.3)
+
+* rcmgr: fix JSON marshalling of ResourceManagerStat peer map [2156](https://github.com/libp2p/go-libp2p/pull/2156)
+* websocket: Don't limit message sizes in the websocket reader [2193](https://github.com/libp2p/go-libp2p/pull/2193)
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.3
+
+# [v0.26.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.2)
+
+This patch release fixes two bugs:
+* A panic in WebTransport: https://github.com/quic-go/webtransport-go/releases/tag/v0.5.2
+* Incorrect accounting of accepted connections in the swarm metrics: [#2147](https://github.com/libp2p/go-libp2p/pull/2147)
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.2
+
+# v0.26.1
+
+This version was retracted due to errors when publishing the release.
+
+# [v0.26.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.0)
+
+## 🔦 Highlights
+
+### Circuit Relay Changes
+
+#### [Removed Circuit Relay v1](https://github.com/libp2p/go-libp2p/pull/2107)
+
+We've decided to remove support for Circuit Relay v1 in this release. v1 Relays have been retired a few months ago. Notably, running the Relay v1 protocol was expensive and resulted in only a small number of nodes in the network. Users had to either manually configure these nodes as static relays, or discover them from the DHT.
+Furthermore, rust-libp2p [has dropped support](https://github.com/libp2p/rust-libp2p/pull/2549) and js-libp2p [is dropping support](https://github.com/libp2p/js-libp2p/pull/1533) for Relay v1.
+
+Support for Relay v2 was first added in [late 2021 in v0.16.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.16.0). With Circuit Relay v2 it became cheap to run (limited) relays. Public nodes also started the relay service by default. There's now a massive number of Relay v2 nodes on the IPFS network, and they don't advertise their service to the DHT any more. Because there's now so many of these nodes, connecting to just a small number of nodes (e.g. by joining the DHT), a node is statistically guaranteed to connect to some relays.
+
+#### [Unlimited Relay v2](https://github.com/libp2p/go-libp2p/pull/2125)
+
+In conjunction with removing relay v1, we also added an option to Circuit Relay v2 to disable limits.
+This done by enabling `WithInfiniteLimits`. When enabled this allows for users to have a drop in replacement for Relay v1 with Relay v2.
+
+### Additional metrics
+
+Since the last release, we've added additional metrics to different components.
+Metrics were added to:
+* [AutoNat](https://github.com/libp2p/go-libp2p/pull/2086): Current Reachability Status and Confidence, Client and Server DialResponses, Server DialRejections. The dashboard is [available here](https://github.com/libp2p/go-libp2p/blob/master/dashboards/autonat/autonat.json).
+* Swarm:
+ - [Early Muxer Selection](https://github.com/libp2p/go-libp2p/pull/2119): Added early_muxer label indicating whether a connection was established using early muxer selection.
+ - [IP Version](https://github.com/libp2p/go-libp2p/pull/2114): Added ip_version label to connection metrics
+* Identify:
+ - Metrics for Identify, IdentifyPush, PushesTriggered (https://github.com/libp2p/go-libp2p/pull/2069)
+ - Address Count, Protocol Count, Connection IDPush Support (https://github.com/libp2p/go-libp2p/pull/2126)
+
+
+We also migrated the metric dashboards to a top-level [dashboards](https://github.com/libp2p/go-libp2p/tree/master/dashboards) directory.
+
+## 🐞 Bugfixes
+
+### AutoNat
+* [Fixed a bug](https://github.com/libp2p/go-libp2p/issues/2091) where AutoNat would emit events when the observed address has changed even though the node reachability hadn't changed.
+
+### Relay Manager
+* [Fixed a bug](https://github.com/libp2p/go-libp2p/pull/2093) where the Relay Manager started a new relay even though the previous reachability was `Public` or if a relay already existed.
+
+### [Stop sending detailed error messages on closing QUIC connections](https://github.com/libp2p/go-libp2p/pull/2112)
+
+Users reported seeing confusing error messages and could not determine the root cause or if the error was from a local or remote peer:
+
+```{12D... Application error 0x0: conn-27571160: system: cannot reserve inbound connection: resource limit exceeded}```
+
+This error occurred when a connection had been made with a remote peer but the remote peer dropped the connection (due to it exceeding limits).
+This was actually an `Application error` emitted by `quic-go` and it was a bug in go-libp2p that we sent the whole message.
+For now, we decided to stop sending this confusing error message. In the future, we will report such errors via [error codes](https://github.com/libp2p/specs/issues/479).
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.1...v0.26.0
+
+# [v0.25.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.1)
+
+Fix some test-utils used by https://github.com/libp2p/go-libp2p-kad-dht
+
+* mocknet: Start host in mocknet by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2078
+* chore: update go-multistream by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2081
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.0...v0.25.1
+
+# [v0.25.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.0)
+
+## 🔦 Highlights
+
+### Metrics
+
+We've started instrumenting the entire stack. In this release, we're adding metrics for:
+* the swarm: tracking incoming and outgoing connections, transports, security protocols and stream multiplexers in use: (https://github.com/libp2p/go-libp2p/blob/master/p2p/net/swarm/grafana-dashboards/swarm.json)
+* the event bus: tracking how different events are propagated through the stack and to external consumers (https://github.com/libp2p/go-libp2p/blob/master/p2p/host/eventbus/grafana-dashboards/eventbus.json)
+
+Our metrics effort is still ongoing, see https://github.com/libp2p/go-libp2p/issues/1356 for progress. We'll add metrics and dashboards for more libp2p components in a future release.
+
+### Switching to Google's official Protobuf compiler
+
+So far, we were using GoGo Protobuf to compile our Protobuf definitions to Go code. However, this library was deprecated in October last year: https://twitter.com/awalterschulze/status/1584553056100057088. We [benchmarked](https://github.com/libp2p/go-libp2p/issues/1976#issuecomment-1371527732) serialization and deserialization, and found that it's (only) 20% slower than GoGo. Since the vast majority of go-libp2p's CPU time is spent in code paths other than Protobuf handling, switching to the official compiler seemed like a worthwhile tradeoff.
+
+### Removal of OpenSSL
+
+Before this release, go-libp2p had an option to use OpenSSL bindings for certain cryptographic primitives, mostly to speed up the generation of signatures and their verification. When building go-libp2p using `go build`, we'd use the standard library crypto packages. OpenSSL was only used when passing in a build tag: `go build -tags openssl`.
+Maintaining our own fork of the long unmaintained [go-openssl package](https://github.com/libp2p/go-openssl) has proven to place a larger than expected maintenance burden on the libp2p stewards, and when we recently discovered a range of new bugs ([this](https://github.com/libp2p/go-openssl/issues/38) and [this](https://github.com/libp2p/go-libp2p/issues/1892) and [this](https://github.com/libp2p/go-libp2p/issues/1951)), we decided to re-evaluate if this code path is really worth it. The results surprised us, it turns out that:
+* The Go standard library is faster than OpenSSL for all key types that are not RSA.
+* Verifying RSA signatures is as fast as Ed25519 signatures using the Go standard library, and even faster in OpenSSL.
+* Generating RSA signatures is painfully slow, both using Go standard library crypto and using OpenSSL (but even slower using Go standard library).
+
+Now the good news is, that if your node is not using an RSA key, it will never create any RSA signatures (it might need to verify them though, when it connects to a node that uses RSA keys). If you're concerned about CPU performance, it's a good idea to avoid RSA keys (the same applies to bandwidth, RSA keys are huge!). Even for nodes using RSA keys, it turns out that generating the signatures is not a significant part of their CPU load, as verified by profiling one of Kubo's bootstrap nodes.
+
+We therefore concluded that it's safe to drop this code path altogether, and thereby reduce our maintenance burden.
+
+### New Resource Manager types
+
+* Introduces a new type `LimitVal` which can explicitly specify "use default", "unlimited", "block all", as well as any positive number. The zero value of `LimitVal` (the value when you create the object in Go) is "Use default".
+ * The JSON marshalling of this is straightforward.
+* Introduces a new `ResourceLimits` type which uses `LimitVal` instead of ints so it can encode the above for the resources.
+* Changes `LimitConfig` to `PartialLimitConfig` and uses `ResourceLimits`. This along with the marshalling changes means you can now marshal the fact that some resource limit is set to block all.
+ * Because the default is to use the defaults, this avoids the footgun of initializing the resource manager with 0 limits (that would block everything).
+
+In general, you can go from a resource config with defaults to a concrete one with `.Build()`. e.g. `ResourceLimits.Build() => BaseLimit`, `PartialLimitConfig.Build() => ConcreteLimitConfig`, `LimitVal.Build() => int`. See PR #2000 for more details.
+
+If you're using the defaults for the resource manager, there should be no changes needed.
+
+### Other Breaking Changes
+
+We've cleaned up our API to consistently use `protocol.ID` for libp2p and application protocols. Specifically, this means that the peer store now uses `protocol.ID`s, and the host's `SetStreamHandler` as well.
+
+## What's Changed
+* chore: use generic LRU cache by @muXxer in https://github.com/libp2p/go-libp2p/pull/1980
+* core/crypto: drop all OpenSSL code paths by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1953
+* add WebTransport to the list of default transports by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1915
+* identify: remove old code targeting Go 1.17 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1964
+* core: remove introspection package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1978
+* identify: remove support for Identify Delta by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1975
+* roadmap: remove optimizations of the TCP-based handshake by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1959
+* circuitv2: correctly set the transport in the ConnectionState by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1972
+* switch to Google's Protobuf library, make protobufs compile with go generate by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1979
+* ci: run go generate as part of the go-check workflow by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1986
+* ci: use GitHub token to install protoc by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1996
+* feat: add some users to the readme by @p-shahi in https://github.com/libp2p/go-libp2p/pull/1981
+* CI: Fast multidimensional Interop tests by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1991
+* Fix: Ignore zero values when marshalling Limits. by @ajnavarro in https://github.com/libp2p/go-libp2p/pull/1998
+* feat: add ci flakiness score to readme by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2002
+* peerstore: make it possible to use an empty peer ID by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2006
+* feat: rcmgr: Export resource manager errors by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2008
+* feat: ci test-plans: Parse test timeout parameter for interop test by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2014
+* Clean addresses with peer id before adding to addrbook by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2007
+* Expose muxer ids by @aschmahmann in https://github.com/libp2p/go-libp2p/pull/2012
+* swarm: add a basic metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1973
+* consistently use protocol.ID instead of strings by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2004
+* swarm metrics: fix datasource for dashboard by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2024
+* chore: remove textual roadmap in favor for Starmap by @p-shahi in https://github.com/libp2p/go-libp2p/pull/2036
+* rcmgr: *: Always close connscope by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2037
+* chore: remove license files from the eventbus package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2042
+* Migrate to test-plan composite action by @thomaseizinger in https://github.com/libp2p/go-libp2p/pull/2039
+* use quic-go and webtransport-go from quic-go organization by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2040
+* holepunch: fix flaky test by not removing holepunch protocol handler by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1948
+* quic / webtransport: extend test to test dialing a draft-29 and a v1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1957
+* p2p/test: add test for EvtLocalAddressesUpdated event by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2016
+* quic, tcp: only register Prometheus counters when metrics are enabled by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1971
+* p2p/test: fix flaky notification test by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2051
+* quic: disable sending of Version Negotiation packets by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2015
+* eventbus: add metrics by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2038
+* metrics: use a single slice pool for all metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2054
+* webtransport: tidy up some test output by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2053
+* set names for eventbus event subscriptions by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2057
+* autorelay: Split libp2p.EnableAutoRelay into 2 functions by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2022
+* rcmgr: Use prometheus SDK for rcmgr metrics by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2044
+* websocket: Replace gorilla websocket transport with nhooyr websocket transport by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1982
+* rcmgr: add libp2p prefix to all metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2063
+* chore: git-ignore various flavors of qlog files by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2064
+* interop: Update interop test to match spec by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2049
+* chore: update webtransport-go to v0.5.1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2072
+* identify: refactor sending of Identify pushes by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1984
+* feat!: rcmgr: Change LimitConfig to use LimitVal type by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2000
+* p2p/test/quic: use contexts with a timeout for Connect calls by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2070
+* identify: add some basic metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2069
+* chore: Release v0.25.0 by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2077
+
+## New Contributors
+* @muXxer made their first contribution in https://github.com/libp2p/go-libp2p/pull/1980
+* @ajnavarro made their first contribution in https://github.com/libp2p/go-libp2p/pull/1998
+* @sukunrt made their first contribution in https://github.com/libp2p/go-libp2p/pull/2007
+* @thomaseizinger made their first contribution in https://github.com/libp2p/go-libp2p/pull/2039
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.24.2...v0.25.0
diff --git a/vendor/github.com/libp2p/go-libp2p/README.md b/vendor/github.com/libp2p/go-libp2p/README.md
index d17071f26..533a36f75 100644
--- a/vendor/github.com/libp2p/go-libp2p/README.md
+++ b/vendor/github.com/libp2p/go-libp2p/README.md
@@ -10,17 +10,17 @@
+
# Table of Contents
- [Background](#background)
+- [Roadmap](#roadmap)
- [Usage](#usage)
- [Examples](#examples)
- [Development](#development)
- - [Using the go-libp2p Workspace](#using-the-go-libp2p-workspace)
- [Tests](#tests)
- - [Packages](#packages)
- [Contribute](#contribute)
- [Supported Go Versions](#supported-go-versions)
@@ -37,6 +37,11 @@ To learn more, check out the following resources:
- [**js-libp2p implementation**](https://github.com/libp2p/js-libp2p)
- [**rust-libp2p implementation**](https://github.com/libp2p/rust-libp2p)
+## Roadmap
+
+Our roadmap for go-libp2p can be found here: https://github.com/libp2p/go-libp2p/blob/master/ROADMAP.md
+This document represents current projects the go-libp2p team is focused on and provides an estimation of completion targets. It is a completementary roadmap to the overarching libp2p project roadmap: https://github.com/libp2p/specs/blob/master/ROADMAP.md
+
## Usage
This repository (`go-libp2p`) serves as the entrypoint to the universe of packages that compose the Go implementation of the libp2p stack.
@@ -51,11 +56,6 @@ import "github.com/libp2p/go-libp2p"
Examples can be found in the [examples folder](examples).
-## Development
-
-### Tests
-
-`go test ./...` will run all tests in the repo.
# Contribute
@@ -78,3 +78,25 @@ There's a few things you can do right now to help out:
We test against and support the two most recent major releases of Go. This is
informed by Go's own [security policy](https://go.dev/security).
+
+# Notable Users
+Some notable users of go-libp2p are:
+- [Kubo](https://github.com/ipfs/kubo) - The original Go implementation of IPFS
+- [Lotus](https://github.com/filecoin-project/lotus) - An implementation of the Filecoin protocol
+- [Drand](https://github.com/drand/drand) - A distributed random beacon daemon
+- [Prysm](https://github.com/prysmaticlabs/prysm) - An Ethereum Beacon Chain consensus client built by [Prysmatic Labs](https://prysmaticlabs.com/)
+- [Berty](https://github.com/berty/berty) - An open, secure, offline-first, peer-to-peer and zero trust messaging app.
+- [Wasp](https://github.com/iotaledger/wasp) - A node that runs IOTA Smart Contracts built by the [IOTA Foundation](https://www.iota.org/)
+- [Mina](https://github.com/minaprotocol/mina) - A lightweight, constant-sized blockchain that runs zero-knowledge smart contracts
+- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) - A modular, extensible framework for building Ethereum compatible networks
+- [Celestia Node](https://github.com/celestiaorg/celestia-node) - The Go implementation of Celestia's data availability nodes
+- [Status go](https://github.com/status-im/status-go) - Status bindings for go-ethereum, built by [Status.im](https://status.im/)
+- [Flow](https://github.com/onflow/flow-go) - A blockchain built to support games, apps, and digital assets built by [Dapper Labs](https://www.dapperlabs.com/)
+- [Swarm Bee](https://github.com/ethersphere/bee) - A client for connecting to the [Swarm network](https://www.ethswarm.org/)
+- [Elrond Go](https://github.com/multiversx/mx-chain-go) - The Go implementation of the the Elrond network protocol
+- [Sonr](https://github.com/sonr-io/sonr) - A platform to integrate DID Documents, WebAuthn, and IPFS and manage digital identity and assets.
+- [EdgeVPN](https://github.com/mudler/edgevpn) - A decentralized, immutable, portable VPN and reverse proxy over p2p.
+- [Kairos](https://github.com/kairos-io/kairos) - A Kubernetes-focused, Cloud Native Linux meta-distribution.
+- [Oasis Core](https://github.com/oasisprotocol/oasis-core) - The consensus and runtime layers of the [Oasis protocol](https://oasisprotocol.org/).
+
+Please open a pull request if you want your project to be added here.
diff --git a/vendor/github.com/libp2p/go-libp2p/ROADMAP.md b/vendor/github.com/libp2p/go-libp2p/ROADMAP.md
new file mode 100644
index 000000000..5c9eb6030
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/ROADMAP.md
@@ -0,0 +1,5 @@
+# go-libp2p roadmap Q4’22/Q1’23
+
+Please see our roadmap in [Starmap](https://starmap.site/roadmap/github.com/libp2p/go-libp2p/issues/1806#simple)
+
+Please add any feedback or questions in: https://github.com/libp2p/go-libp2p/issues/1806
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-libp2p/SECURITY.md b/vendor/github.com/libp2p/go-libp2p/SECURITY.md
new file mode 100644
index 000000000..0ecad4301
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/SECURITY.md
@@ -0,0 +1,20 @@
+# Security Policy
+
+go-libp2p is still in development. This means that there may be problems in our protocols,
+or there may be mistakes in our implementations.
+We take security vulnerabilities very seriously. If you discover a security issue,
+please bring it to our attention right away!
+
+## Reporting a Vulnerability
+
+If you find a vulnerability that may affect live deployments -- for example, by exposing
+a remote execution exploit -- please [**report privately**](https://github.com/libp2p/go-libp2p/security/advisories/new).
+Please **DO NOT file a public issue**.
+
+If the issue is an implementation weakness that cannot be immediately exploited or
+something not yet deployed, just discuss it openly.
+If you need assistance, please reach out to [security@libp2p.io](mailto:security@libp2p.io).
+
+## Reporting a non security bug
+
+For non-security bugs, please simply file a GitHub [issue](https://github.com/libp2p/go-libp2p/issues/new).
diff --git a/vendor/github.com/libp2p/go-libp2p/config/config.go b/vendor/github.com/libp2p/go-libp2p/config/config.go
index 6561d8522..0eb2b0d97 100644
--- a/vendor/github.com/libp2p/go-libp2p/config/config.go
+++ b/vendor/github.com/libp2p/go-libp2p/config/config.go
@@ -2,24 +2,29 @@ package config
import (
"crypto/rand"
+ "errors"
"fmt"
"time"
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/routing"
"github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
"github.com/libp2p/go-libp2p/core/transport"
"github.com/libp2p/go-libp2p/p2p/host/autonat"
"github.com/libp2p/go-libp2p/p2p/host/autorelay"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
blankhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
routed "github.com/libp2p/go-libp2p/p2p/host/routed"
"github.com/libp2p/go-libp2p/p2p/net/swarm"
@@ -27,14 +32,15 @@ import (
circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
"github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/prometheus/client_golang/prometheus"
- logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
+ "go.uber.org/fx"
+ "go.uber.org/fx/fxevent"
)
-var log = logging.Logger("p2p-config")
-
// AddrsFactory is a function that takes a set of multiaddrs we're listening on and
// returns the set of multiaddrs we should advertise to the network.
type AddrsFactory = bhost.AddrsFactory
@@ -53,6 +59,11 @@ type AutoNATConfig struct {
ThrottleInterval time.Duration
}
+type Security struct {
+ ID protocol.ID
+ Constructor interface{}
+}
+
// Config describes a set of settings for a libp2p node
//
// This is *not* a stable interface. Use the options defined in the root
@@ -64,11 +75,17 @@ type Config struct {
// Set it via the UserAgent option function.
UserAgent string
+ // ProtocolVersion is the protocol version that identifies the family
+ // of protocols used by the peer in the Identify protocol. It is set
+ // using the [ProtocolVersion] option.
+ ProtocolVersion string
+
PeerKey crypto.PrivKey
- Transports []TptC
- Muxers []MsMuxC
- SecurityTransports []MsSecC
+ QUICReuse []fx.Option
+ Transports []fx.Option
+ Muxers []tptu.StreamMuxer
+ SecurityTransports []Security
Insecure bool
PSK pnet.PSK
@@ -103,9 +120,14 @@ type Config struct {
EnableHolePunching bool
HolePunchingOptions []holepunch.Option
+
+ DisableMetrics bool
+ PrometheusRegisterer prometheus.Registerer
+
+ DialRanker network.DialRanker
}
-func (cfg *Config) makeSwarm() (*swarm.Swarm, error) {
+func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swarm, error) {
if cfg.Peerstore == nil {
return nil, fmt.Errorf("no peerstore specified")
}
@@ -137,7 +159,7 @@ func (cfg *Config) makeSwarm() (*swarm.Swarm, error) {
return nil, err
}
- opts := make([]swarm.Option, 0, 3)
+ opts := make([]swarm.Option, 0, 6)
if cfg.Reporter != nil {
opts = append(opts, swarm.WithMetrics(cfg.Reporter))
}
@@ -150,8 +172,20 @@ func (cfg *Config) makeSwarm() (*swarm.Swarm, error) {
if cfg.ResourceManager != nil {
opts = append(opts, swarm.WithResourceManager(cfg.ResourceManager))
}
+ if cfg.MultiaddrResolver != nil {
+ opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver))
+ }
+ dialRanker := cfg.DialRanker
+ if dialRanker == nil {
+ dialRanker = swarm.NoDelayDialRanker
+ }
+ opts = append(opts, swarm.WithDialRanker(dialRanker))
+ if enableMetrics {
+ opts = append(opts,
+ swarm.WithMetricsTracer(swarm.NewMetricsTracer(swarm.WithRegisterer(cfg.PrometheusRegisterer))))
+ }
// TODO: Make the swarm implementation configurable.
- return swarm.NewSwarm(pid, cfg.Peerstore, opts...)
+ return swarm.NewSwarm(pid, cfg.Peerstore, eventBus, opts...)
}
func (cfg *Config) addTransports(h host.Host) error {
@@ -160,51 +194,98 @@ func (cfg *Config) addTransports(h host.Host) error {
// Should probably skip this if no transports.
return fmt.Errorf("swarm does not support transports")
}
- var secure sec.SecureMuxer
+
+ fxopts := []fx.Option{
+ fx.WithLogger(func() fxevent.Logger { return getFXLogger() }),
+ fx.Provide(fx.Annotate(tptu.New, fx.ParamTags(`name:"security"`))),
+ fx.Supply(cfg.Muxers),
+ fx.Supply(h.ID()),
+ fx.Provide(func() host.Host { return h }),
+ fx.Provide(func() crypto.PrivKey { return h.Peerstore().PrivKey(h.ID()) }),
+ fx.Provide(func() connmgr.ConnectionGater { return cfg.ConnectionGater }),
+ fx.Provide(func() pnet.PSK { return cfg.PSK }),
+ fx.Provide(func() network.ResourceManager { return cfg.ResourceManager }),
+ fx.Provide(func() *madns.Resolver { return cfg.MultiaddrResolver }),
+ }
+ fxopts = append(fxopts, cfg.Transports...)
if cfg.Insecure {
- secure = makeInsecureTransport(h.ID(), cfg.PeerKey)
+ fxopts = append(fxopts,
+ fx.Provide(
+ fx.Annotate(
+ func(id peer.ID, priv crypto.PrivKey) []sec.SecureTransport {
+ return []sec.SecureTransport{insecure.NewWithIdentity(insecure.ID, id, priv)}
+ },
+ fx.ResultTags(`name:"security"`),
+ ),
+ ),
+ )
} else {
- var err error
- secure, err = makeSecurityMuxer(h, cfg.SecurityTransports)
- if err != nil {
- return err
+ // fx groups are unordered, but we need to preserve the order of the security transports
+ // First of all, we construct the security transports that are needed,
+ // and save them to a group call security_unordered.
+ for _, s := range cfg.SecurityTransports {
+ fxName := fmt.Sprintf(`name:"security_%s"`, s.ID)
+ fxopts = append(fxopts, fx.Supply(fx.Annotate(s.ID, fx.ResultTags(fxName))))
+ fxopts = append(fxopts,
+ fx.Provide(fx.Annotate(
+ s.Constructor,
+ fx.ParamTags(fxName),
+ fx.As(new(sec.SecureTransport)),
+ fx.ResultTags(`group:"security_unordered"`),
+ )),
+ )
}
+ // Then we consume the group security_unordered, and order them by the user's preference.
+ fxopts = append(fxopts, fx.Provide(
+ fx.Annotate(
+ func(secs []sec.SecureTransport) ([]sec.SecureTransport, error) {
+ if len(secs) != len(cfg.SecurityTransports) {
+ return nil, errors.New("inconsistent length for security transports")
+ }
+ t := make([]sec.SecureTransport, 0, len(secs))
+ for _, s := range cfg.SecurityTransports {
+ for _, st := range secs {
+ if s.ID != st.ID() {
+ continue
+ }
+ t = append(t, st)
+ }
+ }
+ return t, nil
+ },
+ fx.ParamTags(`group:"security_unordered"`),
+ fx.ResultTags(`name:"security"`),
+ )))
+ }
+
+ fxopts = append(fxopts, fx.Provide(PrivKeyToStatelessResetKey))
+ if cfg.QUICReuse != nil {
+ fxopts = append(fxopts, cfg.QUICReuse...)
+ } else {
+ fxopts = append(fxopts, fx.Provide(quicreuse.NewConnManager)) // TODO: close the ConnManager when shutting down the node
+ }
+
+ fxopts = append(fxopts, fx.Invoke(
+ fx.Annotate(
+ func(tpts []transport.Transport) error {
+ for _, t := range tpts {
+ if err := swrm.AddTransport(t); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ fx.ParamTags(`group:"transport"`),
+ )),
+ )
+ if cfg.Relay {
+ fxopts = append(fxopts, fx.Invoke(circuitv2.AddTransport))
}
- muxer, err := makeMuxer(h, cfg.Muxers)
- if err != nil {
- return err
- }
- var opts []tptu.Option
- if len(cfg.PSK) > 0 {
- opts = append(opts, tptu.WithPSK(cfg.PSK))
- }
- if cfg.ConnectionGater != nil {
- opts = append(opts, tptu.WithConnectionGater(cfg.ConnectionGater))
- }
- if cfg.ResourceManager != nil {
- opts = append(opts, tptu.WithResourceManager(cfg.ResourceManager))
- }
- upgrader, err := tptu.New(secure, muxer, opts...)
- if err != nil {
- return err
- }
- tpts, err := makeTransports(h, upgrader, cfg.ConnectionGater, cfg.PSK, cfg.ResourceManager, cfg.Transports)
- if err != nil {
+ app := fx.New(fxopts...)
+ if err := app.Err(); err != nil {
+ h.Close()
return err
}
- for _, t := range tpts {
- if err := swrm.AddTransport(t); err != nil {
- return err
- }
- }
-
- if cfg.Relay {
- if err := circuitv2.AddTransport(h, upgrader); err != nil {
- h.Close()
- return err
- }
- }
-
return nil
}
@@ -212,22 +293,26 @@ func (cfg *Config) addTransports(h host.Host) error {
//
// This function consumes the config. Do not reuse it (really!).
func (cfg *Config) NewNode() (host.Host, error) {
- swrm, err := cfg.makeSwarm()
+ eventBus := eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer(eventbus.WithRegisterer(cfg.PrometheusRegisterer))))
+ swrm, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics)
if err != nil {
return nil, err
}
h, err := bhost.NewHost(swrm, &bhost.HostOpts{
- ConnManager: cfg.ConnManager,
- AddrsFactory: cfg.AddrsFactory,
- NATManager: cfg.NATManager,
- EnablePing: !cfg.DisablePing,
- UserAgent: cfg.UserAgent,
- MultiaddrResolver: cfg.MultiaddrResolver,
- EnableHolePunching: cfg.EnableHolePunching,
- HolePunchingOptions: cfg.HolePunchingOptions,
- EnableRelayService: cfg.EnableRelayService,
- RelayServiceOpts: cfg.RelayServiceOpts,
+ EventBus: eventBus,
+ ConnManager: cfg.ConnManager,
+ AddrsFactory: cfg.AddrsFactory,
+ NATManager: cfg.NATManager,
+ EnablePing: !cfg.DisablePing,
+ UserAgent: cfg.UserAgent,
+ ProtocolVersion: cfg.ProtocolVersion,
+ EnableHolePunching: cfg.EnableHolePunching,
+ HolePunchingOptions: cfg.HolePunchingOptions,
+ EnableRelayService: cfg.EnableRelayService,
+ RelayServiceOpts: cfg.RelayServiceOpts,
+ EnableMetrics: !cfg.DisableMetrics,
+ PrometheusRegisterer: cfg.PrometheusRegisterer,
})
if err != nil {
swrm.Close()
@@ -276,6 +361,12 @@ func (cfg *Config) NewNode() (host.Host, error) {
h.Close()
return nil, fmt.Errorf("cannot enable autorelay; relay is not enabled")
}
+ if !cfg.DisableMetrics {
+ mt := autorelay.WithMetricsTracer(
+ autorelay.NewMetricsTracer(autorelay.WithRegisterer(cfg.PrometheusRegisterer)))
+ mtOpts := []autorelay.Option{mt}
+ cfg.AutoRelayOpts = append(mtOpts, cfg.AutoRelayOpts...)
+ }
ar, err = autorelay.NewAutoRelay(h, cfg.AutoRelayOpts...)
if err != nil {
@@ -288,6 +379,11 @@ func (cfg *Config) NewNode() (host.Host, error) {
return addrF(h.AllAddrs())
}),
}
+ if !cfg.DisableMetrics {
+ autonatOpts = append(autonatOpts,
+ autonat.WithMetricsTracer(
+ autonat.NewMetricsTracer(autonat.WithRegisterer(cfg.PrometheusRegisterer))))
+ }
if cfg.AutoNATConfig.ThrottleInterval != 0 {
autonatOpts = append(autonatOpts,
autonat.WithThrottling(cfg.AutoNATConfig.ThrottleGlobalLimit, cfg.AutoNATConfig.ThrottleInterval),
@@ -316,9 +412,10 @@ func (cfg *Config) NewNode() (host.Host, error) {
Reporter: cfg.Reporter,
PeerKey: autonatPrivKey,
Peerstore: ps,
+ DialRanker: swarm.NoDelayDialRanker,
}
- dialer, err := autoNatCfg.makeSwarm()
+ dialer, err := autoNatCfg.makeSwarm(eventbus.NewBus(), false)
if err != nil {
h.Close()
return nil, err
@@ -354,7 +451,9 @@ func (cfg *Config) NewNode() (host.Host, error) {
ho = routed.Wrap(h, router)
}
if ar != nil {
- return autorelay.NewAutoRelayHost(ho, ar), nil
+ arh := autorelay.NewAutoRelayHost(ho, ar)
+ arh.Start()
+ ho = arh
}
return ho, nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/constructor_types.go b/vendor/github.com/libp2p/go-libp2p/config/constructor_types.go
deleted file mode 100644
index 7cb24464b..000000000
--- a/vendor/github.com/libp2p/go-libp2p/config/constructor_types.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package config
-
-import (
- "fmt"
- "reflect"
-
- "github.com/libp2p/go-libp2p/core/connmgr"
- "github.com/libp2p/go-libp2p/core/crypto"
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/peerstore"
- "github.com/libp2p/go-libp2p/core/pnet"
- "github.com/libp2p/go-libp2p/core/sec"
- "github.com/libp2p/go-libp2p/core/transport"
-)
-
-var (
- // interfaces
- hostType = reflect.TypeOf((*host.Host)(nil)).Elem()
- networkType = reflect.TypeOf((*network.Network)(nil)).Elem()
- transportType = reflect.TypeOf((*transport.Transport)(nil)).Elem()
- muxType = reflect.TypeOf((*network.Multiplexer)(nil)).Elem()
- securityType = reflect.TypeOf((*sec.SecureTransport)(nil)).Elem()
- privKeyType = reflect.TypeOf((*crypto.PrivKey)(nil)).Elem()
- pubKeyType = reflect.TypeOf((*crypto.PubKey)(nil)).Elem()
- pstoreType = reflect.TypeOf((*peerstore.Peerstore)(nil)).Elem()
- connGaterType = reflect.TypeOf((*connmgr.ConnectionGater)(nil)).Elem()
- upgraderType = reflect.TypeOf((*transport.Upgrader)(nil)).Elem()
- rcmgrType = reflect.TypeOf((*network.ResourceManager)(nil)).Elem()
-
- // concrete types
- peerIDType = reflect.TypeOf((peer.ID)(""))
- pskType = reflect.TypeOf((pnet.PSK)(nil))
-)
-
-var argTypes = map[reflect.Type]constructor{
- upgraderType: func(_ host.Host, u transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return u
- },
- hostType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return h
- },
- networkType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return h.Network()
- },
- pskType: func(_ host.Host, _ transport.Upgrader, psk pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return psk
- },
- connGaterType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, cg connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return cg
- },
- peerIDType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return h.ID()
- },
- privKeyType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return h.Peerstore().PrivKey(h.ID())
- },
- pubKeyType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return h.Peerstore().PubKey(h.ID())
- },
- pstoreType: func(h host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) interface{} {
- return h.Peerstore()
- },
- rcmgrType: func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, rcmgr network.ResourceManager) interface{} {
- return rcmgr
- },
-}
-
-func newArgTypeSet(types ...reflect.Type) map[reflect.Type]constructor {
- result := make(map[reflect.Type]constructor, len(types))
- for _, ty := range types {
- c, ok := argTypes[ty]
- if !ok {
- panic(fmt.Sprintf("missing constructor for type %s", ty))
- }
- result[ty] = c
- }
- return result
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/log.go b/vendor/github.com/libp2p/go-libp2p/config/log.go
new file mode 100644
index 000000000..3b74c38c7
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/config/log.go
@@ -0,0 +1,28 @@
+package config
+
+import (
+ "strings"
+ "sync"
+
+ logging "github.com/ipfs/go-log/v2"
+ "go.uber.org/fx/fxevent"
+)
+
+var log = logging.Logger("p2p-config")
+
+var (
+ fxLogger fxevent.Logger
+ logInitOnce sync.Once
+)
+
+type fxLogWriter struct{}
+
+func (l *fxLogWriter) Write(b []byte) (int, error) {
+ log.Debug(strings.TrimSuffix(string(b), "\n"))
+ return len(b), nil
+}
+
+func getFXLogger() fxevent.Logger {
+ logInitOnce.Do(func() { fxLogger = &fxevent.ConsoleLogger{W: &fxLogWriter{}} })
+ return fxLogger
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/muxer.go b/vendor/github.com/libp2p/go-libp2p/config/muxer.go
deleted file mode 100644
index 651dea76b..000000000
--- a/vendor/github.com/libp2p/go-libp2p/config/muxer.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package config
-
-import (
- "fmt"
-
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/network"
- msmux "github.com/libp2p/go-libp2p/p2p/muxer/muxer-multistream"
-)
-
-// MuxC is a stream multiplex transport constructor.
-type MuxC func(h host.Host) (network.Multiplexer, error)
-
-// MsMuxC is a tuple containing a multiplex transport constructor and a protocol
-// ID.
-type MsMuxC struct {
- MuxC
- ID string
-}
-
-var muxArgTypes = newArgTypeSet(hostType, networkType, peerIDType, pstoreType)
-
-// MuxerConstructor creates a multiplex constructor from the passed parameter
-// using reflection.
-func MuxerConstructor(m interface{}) (MuxC, error) {
- // Already constructed?
- if t, ok := m.(network.Multiplexer); ok {
- return func(_ host.Host) (network.Multiplexer, error) {
- return t, nil
- }, nil
- }
-
- ctor, err := makeConstructor(m, muxType, muxArgTypes)
- if err != nil {
- return nil, err
- }
- return func(h host.Host) (network.Multiplexer, error) {
- t, err := ctor(h, nil, nil, nil, nil)
- if err != nil {
- return nil, err
- }
- return t.(network.Multiplexer), nil
- }, nil
-}
-
-func makeMuxer(h host.Host, tpts []MsMuxC) (network.Multiplexer, error) {
- muxMuxer := msmux.NewBlankTransport()
- transportSet := make(map[string]struct{}, len(tpts))
- for _, tptC := range tpts {
- if _, ok := transportSet[tptC.ID]; ok {
- return nil, fmt.Errorf("duplicate muxer transport: %s", tptC.ID)
- }
- transportSet[tptC.ID] = struct{}{}
- }
- for _, tptC := range tpts {
- tpt, err := tptC.MuxC(h)
- if err != nil {
- return nil, err
- }
- muxMuxer.AddTransport(tptC.ID, tpt)
- }
- return muxMuxer, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go b/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go
new file mode 100644
index 000000000..a12be56f5
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go
@@ -0,0 +1,27 @@
+package config
+
+import (
+ "crypto/sha256"
+ "io"
+
+ "golang.org/x/crypto/hkdf"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+
+ "github.com/quic-go/quic-go"
+)
+
+const statelessResetKeyInfo = "libp2p quic stateless reset key"
+
+func PrivKeyToStatelessResetKey(key crypto.PrivKey) (quic.StatelessResetKey, error) {
+ var statelessResetKey quic.StatelessResetKey
+ keyBytes, err := key.Raw()
+ if err != nil {
+ return statelessResetKey, err
+ }
+ keyReader := hkdf.New(sha256.New, keyBytes, nil, []byte(statelessResetKeyInfo))
+ if _, err := io.ReadFull(keyReader, statelessResetKey[:]); err != nil {
+ return statelessResetKey, err
+ }
+ return statelessResetKey, nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/reflection_magic.go b/vendor/github.com/libp2p/go-libp2p/config/reflection_magic.go
deleted file mode 100644
index 407e58505..000000000
--- a/vendor/github.com/libp2p/go-libp2p/config/reflection_magic.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package config
-
-import (
- "errors"
- "fmt"
- "reflect"
- "runtime"
-
- "github.com/libp2p/go-libp2p/core/connmgr"
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/pnet"
- "github.com/libp2p/go-libp2p/core/transport"
-)
-
-var errorType = reflect.TypeOf((*error)(nil)).Elem()
-
-// checks if a function returns either the specified type or the specified type
-// and an error.
-func checkReturnType(fnType, tptType reflect.Type) error {
- switch fnType.NumOut() {
- case 2:
- if fnType.Out(1) != errorType {
- return fmt.Errorf("expected (optional) second return value from transport constructor to be an error")
- }
-
- fallthrough
- case 1:
- if !fnType.Out(0).Implements(tptType) {
- return fmt.Errorf("transport constructor returns %s which doesn't implement %s", fnType.Out(0), tptType)
- }
- default:
- return fmt.Errorf("expected transport constructor to return a transport and, optionally, an error")
- }
- return nil
-}
-
-// Handles return values with optional errors. That is, return values of the
-// form `(something, error)` or just `something`.
-//
-// Panics if the return value isn't of the correct form.
-func handleReturnValue(out []reflect.Value) (interface{}, error) {
- switch len(out) {
- case 2:
- err := out[1]
- if err != (reflect.Value{}) && !err.IsNil() {
- return nil, err.Interface().(error)
- }
- fallthrough
- case 1:
- tpt := out[0]
-
- // Check for nil value and nil error.
- if tpt == (reflect.Value{}) {
- return nil, fmt.Errorf("unspecified error")
- }
- switch tpt.Kind() {
- case reflect.Ptr, reflect.Interface, reflect.Func:
- if tpt.IsNil() {
- return nil, fmt.Errorf("unspecified error")
- }
- }
-
- return tpt.Interface(), nil
- default:
- panic("expected 1 or 2 return values from transport constructor")
- }
-}
-
-// calls the transport constructor and annotates the error with the name of the constructor.
-func callConstructor(c reflect.Value, args []reflect.Value) (interface{}, error) {
- val, err := handleReturnValue(c.Call(args))
- if err != nil {
- name := runtime.FuncForPC(c.Pointer()).Name()
- if name != "" {
- // makes debugging easier
- return nil, fmt.Errorf("transport constructor %s failed: %s", name, err)
- }
- }
- return val, err
-}
-
-type constructor func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager) interface{}
-
-func makeArgumentConstructors(fnType reflect.Type, argTypes map[reflect.Type]constructor) ([]constructor, error) {
- params := fnType.NumIn()
- if fnType.IsVariadic() {
- params--
- }
- out := make([]constructor, params)
- for i := range out {
- argType := fnType.In(i)
- c, ok := argTypes[argType]
- if !ok {
- return nil, fmt.Errorf("argument %d has an unexpected type %s", i, argType.Name())
- }
- out[i] = c
- }
- return out, nil
-}
-
-func getConstructorOpts(t reflect.Type, opts ...interface{}) ([]reflect.Value, error) {
- if !t.IsVariadic() {
- if len(opts) > 0 {
- return nil, errors.New("constructor doesn't accept any options")
- }
- return nil, nil
- }
- if len(opts) == 0 {
- return nil, nil
- }
- // variadic parameters always go last
- wantType := t.In(t.NumIn() - 1).Elem()
- values := make([]reflect.Value, 0, len(opts))
- for _, opt := range opts {
- val := reflect.ValueOf(opt)
- if opt == nil {
- return nil, errors.New("expected a transport option, got nil")
- }
- if val.Type() != wantType {
- return nil, fmt.Errorf("expected option of type %s, got %s", wantType, reflect.TypeOf(opt))
- }
- values = append(values, val.Convert(wantType))
- }
- return values, nil
-}
-
-// makes a transport constructor.
-func makeConstructor(
- tpt interface{},
- tptType reflect.Type,
- argTypes map[reflect.Type]constructor,
- opts ...interface{},
-) (func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager) (interface{}, error), error) {
- v := reflect.ValueOf(tpt)
- // avoid panicing on nil/zero value.
- if v == (reflect.Value{}) {
- return nil, fmt.Errorf("expected a transport or transport constructor, got a %T", tpt)
- }
- t := v.Type()
- if t.Kind() != reflect.Func {
- return nil, fmt.Errorf("expected a transport or transport constructor, got a %T", tpt)
- }
-
- if err := checkReturnType(t, tptType); err != nil {
- return nil, err
- }
-
- argConstructors, err := makeArgumentConstructors(t, argTypes)
- if err != nil {
- return nil, err
- }
- optValues, err := getConstructorOpts(t, opts...)
- if err != nil {
- return nil, err
- }
-
- return func(h host.Host, u transport.Upgrader, psk pnet.PSK, cg connmgr.ConnectionGater, rcmgr network.ResourceManager) (interface{}, error) {
- arguments := make([]reflect.Value, 0, len(argConstructors)+len(opts))
- for i, makeArg := range argConstructors {
- if arg := makeArg(h, u, psk, cg, rcmgr); arg != nil {
- arguments = append(arguments, reflect.ValueOf(arg))
- } else {
- // ValueOf an un-typed nil yields a zero reflect
- // value. However, we _want_ the zero value of
- // the _type_.
- arguments = append(arguments, reflect.Zero(t.In(i)))
- }
- }
- arguments = append(arguments, optValues...)
- return callConstructor(v, arguments)
- }, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/security.go b/vendor/github.com/libp2p/go-libp2p/config/security.go
deleted file mode 100644
index a98d761a8..000000000
--- a/vendor/github.com/libp2p/go-libp2p/config/security.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package config
-
-import (
- "fmt"
-
- "github.com/libp2p/go-libp2p/core/crypto"
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/sec"
- "github.com/libp2p/go-libp2p/core/sec/insecure"
- csms "github.com/libp2p/go-libp2p/p2p/net/conn-security-multistream"
-)
-
-// SecC is a security transport constructor.
-type SecC func(h host.Host) (sec.SecureTransport, error)
-
-// MsSecC is a tuple containing a security transport constructor and a protocol
-// ID.
-type MsSecC struct {
- SecC
- ID string
-}
-
-var securityArgTypes = newArgTypeSet(
- hostType, networkType, peerIDType,
- privKeyType, pubKeyType, pstoreType,
-)
-
-// SecurityConstructor creates a security constructor from the passed parameter
-// using reflection.
-func SecurityConstructor(security interface{}) (SecC, error) {
- // Already constructed?
- if t, ok := security.(sec.SecureTransport); ok {
- return func(_ host.Host) (sec.SecureTransport, error) {
- return t, nil
- }, nil
- }
-
- ctor, err := makeConstructor(security, securityType, securityArgTypes)
- if err != nil {
- return nil, err
- }
- return func(h host.Host) (sec.SecureTransport, error) {
- t, err := ctor(h, nil, nil, nil, nil)
- if err != nil {
- return nil, err
- }
- return t.(sec.SecureTransport), nil
- }, nil
-}
-
-func makeInsecureTransport(id peer.ID, privKey crypto.PrivKey) sec.SecureMuxer {
- secMuxer := new(csms.SSMuxer)
- secMuxer.AddTransport(insecure.ID, insecure.NewWithIdentity(id, privKey))
- return secMuxer
-}
-
-func makeSecurityMuxer(h host.Host, tpts []MsSecC) (sec.SecureMuxer, error) {
- secMuxer := new(csms.SSMuxer)
- transportSet := make(map[string]struct{}, len(tpts))
- for _, tptC := range tpts {
- if _, ok := transportSet[tptC.ID]; ok {
- return nil, fmt.Errorf("duplicate security transport: %s", tptC.ID)
- }
- transportSet[tptC.ID] = struct{}{}
- }
- for _, tptC := range tpts {
- tpt, err := tptC.SecC(h)
- if err != nil {
- return nil, err
- }
- if _, ok := tpt.(*insecure.Transport); ok {
- return nil, fmt.Errorf("cannot construct libp2p with an insecure transport, set the Insecure config option instead")
- }
- secMuxer.AddTransport(tptC.ID, tpt)
- }
- return secMuxer, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/config/transport.go b/vendor/github.com/libp2p/go-libp2p/config/transport.go
deleted file mode 100644
index 9006683a0..000000000
--- a/vendor/github.com/libp2p/go-libp2p/config/transport.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package config
-
-import (
- "github.com/libp2p/go-libp2p/core/connmgr"
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/pnet"
- "github.com/libp2p/go-libp2p/core/transport"
-)
-
-// TptC is the type for libp2p transport constructors. You probably won't ever
-// implement this function interface directly. Instead, pass your transport
-// constructor to TransportConstructor.
-type TptC func(host.Host, transport.Upgrader, pnet.PSK, connmgr.ConnectionGater, network.ResourceManager) (transport.Transport, error)
-
-var transportArgTypes = argTypes
-
-// TransportConstructor uses reflection to turn a function that constructs a
-// transport into a TptC.
-//
-// You can pass either a constructed transport (something that implements
-// `transport.Transport`) or a function that takes any of:
-//
-// * The local peer ID.
-// * A transport connection upgrader.
-// * A private key.
-// * A public key.
-// * A Host.
-// * A Network.
-// * A Peerstore.
-// * An address filter.
-// * A security transport.
-// * A stream multiplexer transport.
-// * A private network protection key.
-// * A connection gater.
-//
-// And returns a type implementing transport.Transport and, optionally, an error
-// (as the second argument).
-func TransportConstructor(tpt interface{}, opts ...interface{}) (TptC, error) {
- // Already constructed?
- if t, ok := tpt.(transport.Transport); ok {
- return func(_ host.Host, _ transport.Upgrader, _ pnet.PSK, _ connmgr.ConnectionGater, _ network.ResourceManager) (transport.Transport, error) {
- return t, nil
- }, nil
- }
- ctor, err := makeConstructor(tpt, transportType, transportArgTypes, opts...)
- if err != nil {
- return nil, err
- }
- return func(h host.Host, u transport.Upgrader, psk pnet.PSK, cg connmgr.ConnectionGater, rcmgr network.ResourceManager) (transport.Transport, error) {
- t, err := ctor(h, u, psk, cg, rcmgr)
- if err != nil {
- return nil, err
- }
- return t.(transport.Transport), nil
- }, nil
-}
-
-func makeTransports(h host.Host, u transport.Upgrader, cg connmgr.ConnectionGater, psk pnet.PSK, rcmgr network.ResourceManager, tpts []TptC) ([]transport.Transport, error) {
- transports := make([]transport.Transport, len(tpts))
- for i, tC := range tpts {
- t, err := tC(h, u, psk, cg, rcmgr)
- if err != nil {
- return nil, err
- }
- transports[i] = t
- }
- return transports, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/connmgr/gater.go b/vendor/github.com/libp2p/go-libp2p/core/connmgr/gater.go
index 672aef952..82fa56a87 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/connmgr/gater.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/connmgr/gater.go
@@ -52,7 +52,6 @@ import (
// DisconnectReasons is that we require stream multiplexing capability to open a
// control protocol stream to transmit the message.
type ConnectionGater interface {
-
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
//
// This is called by the network.Network implementation when dialling a peer.
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go
index 3d7b39a22..9133141c8 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go
@@ -12,11 +12,13 @@ import (
"fmt"
"io"
- pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/crypto/pb"
- "github.com/gogo/protobuf/proto"
+ "google.golang.org/protobuf/proto"
)
+//go:generate protoc --go_out=. --go_opt=Mpb/crypto.proto=./pb pb/crypto.proto
+
const (
// RSA is an enum for the supported RSA key type
RSA = iota
@@ -194,7 +196,7 @@ func PublicKeyFromProto(pmes *pb.PublicKey) (PubKey, error) {
switch tpk := pk.(type) {
case *RsaPublicKey:
- tpk.cached, _ = pmes.Marshal()
+ tpk.cached, _ = proto.Marshal(pmes)
}
return pk, nil
@@ -214,14 +216,14 @@ func MarshalPublicKey(k PubKey) ([]byte, error) {
// PublicKeyToProto converts a public key object into an unserialized
// protobuf PublicKey message.
func PublicKeyToProto(k PubKey) (*pb.PublicKey, error) {
- pbmes := new(pb.PublicKey)
- pbmes.Type = k.Type()
data, err := k.Raw()
if err != nil {
return nil, err
}
- pbmes.Data = data
- return pbmes, nil
+ return &pb.PublicKey{
+ Type: k.Type().Enum(),
+ Data: data,
+ }, nil
}
// UnmarshalPrivateKey converts a protobuf serialized private key into its
@@ -243,15 +245,14 @@ func UnmarshalPrivateKey(data []byte) (PrivKey, error) {
// MarshalPrivateKey converts a key object into its protobuf serialized form.
func MarshalPrivateKey(k PrivKey) ([]byte, error) {
- pbmes := new(pb.PrivateKey)
- pbmes.Type = k.Type()
data, err := k.Raw()
if err != nil {
return nil, err
}
-
- pbmes.Data = data
- return proto.Marshal(pbmes)
+ return proto.Marshal(&pb.PrivateKey{
+ Type: k.Type().Enum(),
+ Data: data,
+ })
}
// ConfigDecodeKey decodes from b64 (for config file) to a byte array that can be unmarshalled.
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go
deleted file mode 100644
index 7a13ff69a..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go
+++ /dev/null
@@ -1,101 +0,0 @@
-//go:build openssl
-// +build openssl
-
-package crypto
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/x509"
-
- "github.com/libp2p/go-libp2p/core/internal/catch"
-
- "github.com/decred/dcrd/dcrec/secp256k1/v4"
- "github.com/libp2p/go-openssl"
-)
-
-// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p/core/crypto keys
-func KeyPairFromStdKey(priv crypto.PrivateKey) (_priv PrivKey, _pub PubKey, err error) {
- if priv == nil {
- return nil, nil, ErrNilPrivateKey
- }
-
- switch p := priv.(type) {
- case *rsa.PrivateKey:
- defer func() { catch.HandlePanic(recover(), &err, "x509 private key marshaling") }()
- pk, err := openssl.LoadPrivateKeyFromDER(x509.MarshalPKCS1PrivateKey(p))
- if err != nil {
- return nil, nil, err
- }
-
- return &opensslPrivateKey{pk}, &opensslPublicKey{key: pk}, nil
-
- case *ecdsa.PrivateKey:
- return &ECDSAPrivateKey{p}, &ECDSAPublicKey{&p.PublicKey}, nil
-
- case *ed25519.PrivateKey:
- pubIfc := p.Public()
- pub, _ := pubIfc.(ed25519.PublicKey)
- return &Ed25519PrivateKey{*p}, &Ed25519PublicKey{pub}, nil
-
- case *secp256k1.PrivateKey:
- sPriv := Secp256k1PrivateKey(*p)
- sPub := Secp256k1PublicKey(*p.PubKey())
- return &sPriv, &sPub, nil
-
- default:
- return nil, nil, ErrBadKeyType
- }
-}
-
-// PrivKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) private keys
-func PrivKeyToStdKey(priv PrivKey) (_priv crypto.PrivateKey, err error) {
- if priv == nil {
- return nil, ErrNilPrivateKey
- }
- switch p := priv.(type) {
- case *opensslPrivateKey:
- defer func() { catch.HandlePanic(recover(), &err, "x509 private key parsing") }()
- raw, err := p.Raw()
- if err != nil {
- return nil, err
- }
- return x509.ParsePKCS1PrivateKey(raw)
- case *ECDSAPrivateKey:
- return p.priv, nil
- case *Ed25519PrivateKey:
- return &p.k, nil
- case *Secp256k1PrivateKey:
- return p, nil
- default:
- return nil, ErrBadKeyType
- }
-}
-
-// PubKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) public keys
-func PubKeyToStdKey(pub PubKey) (key crypto.PublicKey, err error) {
- if pub == nil {
- return nil, ErrNilPublicKey
- }
-
- switch p := pub.(type) {
- case *opensslPublicKey:
- defer func() { catch.HandlePanic(recover(), &err, "x509 public key parsing") }()
-
- raw, err := p.Raw()
- if err != nil {
- return nil, err
- }
- return x509.ParsePKIXPublicKey(raw)
- case *ECDSAPublicKey:
- return p.pub, nil
- case *Ed25519PublicKey:
- return p.k, nil
- case *Secp256k1PublicKey:
- return p, nil
- default:
- return nil, ErrBadKeyType
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_not_openssl.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go
similarity index 97%
rename from vendor/github.com/libp2p/go-libp2p/core/crypto/key_not_openssl.go
rename to vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go
index 003246752..aead1d251 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_not_openssl.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go
@@ -1,6 +1,3 @@
-//go:build !openssl
-// +build !openssl
-
package crypto
import (
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go
deleted file mode 100644
index d97eb08b8..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go
+++ /dev/null
@@ -1,104 +0,0 @@
-//go:build openssl
-// +build openssl
-
-package crypto
-
-import (
- "sync"
-
- pb "github.com/libp2p/go-libp2p/core/crypto/pb"
-
- "github.com/libp2p/go-openssl"
-)
-
-// define these as separate types so we can add more key types later and reuse
-// code.
-
-type opensslPublicKey struct {
- key openssl.PublicKey
-
- cacheLk sync.Mutex
- cached []byte
-}
-
-type opensslPrivateKey struct {
- key openssl.PrivateKey
-}
-
-func unmarshalOpensslPrivateKey(b []byte) (opensslPrivateKey, error) {
- sk, err := openssl.LoadPrivateKeyFromDER(b)
- if err != nil {
- return opensslPrivateKey{}, err
- }
- return opensslPrivateKey{sk}, nil
-}
-
-func unmarshalOpensslPublicKey(b []byte) (opensslPublicKey, error) {
- sk, err := openssl.LoadPublicKeyFromDER(b)
- if err != nil {
- return opensslPublicKey{}, err
- }
- return opensslPublicKey{key: sk, cached: b}, nil
-}
-
-// Verify compares a signature against input data
-func (pk *opensslPublicKey) Verify(data, sig []byte) (bool, error) {
- err := pk.key.VerifyPKCS1v15(openssl.SHA256_Method, data, sig)
- return err == nil, err
-}
-
-func (pk *opensslPublicKey) Type() pb.KeyType {
- switch pk.key.KeyType() {
- case openssl.KeyTypeRSA:
- return pb.KeyType_RSA
- default:
- return -1
- }
-}
-
-func (pk *opensslPublicKey) Raw() ([]byte, error) {
- return pk.key.MarshalPKIXPublicKeyDER()
-}
-
-// Equals checks whether this key is equal to another
-func (pk *opensslPublicKey) Equals(k Key) bool {
- k0, ok := k.(*RsaPublicKey)
- if !ok {
- return basicEquals(pk, k)
- }
-
- return pk.key.Equal(k0.opensslPublicKey.key)
-}
-
-// Sign returns a signature of the input data
-func (sk *opensslPrivateKey) Sign(message []byte) ([]byte, error) {
- return sk.key.SignPKCS1v15(openssl.SHA256_Method, message)
-}
-
-// GetPublic returns a public key
-func (sk *opensslPrivateKey) GetPublic() PubKey {
- return &opensslPublicKey{key: sk.key}
-}
-
-func (sk *opensslPrivateKey) Type() pb.KeyType {
- switch sk.key.KeyType() {
- case openssl.KeyTypeRSA:
- return pb.KeyType_RSA
- default:
- return -1
- }
-}
-
-func (sk *opensslPrivateKey) Raw() ([]byte, error) {
- return sk.key.MarshalPKCS1PrivateKeyDER()
-}
-
-// Equals checks whether this key is equal to another
-func (sk *opensslPrivateKey) Equals(k Key) bool {
- k0, ok := k.(*RsaPrivateKey)
- if !ok {
- return basicEquals(sk, k)
- }
-
- return sk.key.Equal(k0.opensslPrivateKey.key)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile
deleted file mode 100644
index 8af2dd817..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(PWD)/../..:. --gogofaster_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go
index 072fad9c9..0b4067941 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go
@@ -1,27 +1,24 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: crypto.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/crypto.proto
-package crypto_pb
+package pb
import (
- fmt "fmt"
- github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type KeyType int32
@@ -32,19 +29,21 @@ const (
KeyType_ECDSA KeyType = 3
)
-var KeyType_name = map[int32]string{
- 0: "RSA",
- 1: "Ed25519",
- 2: "Secp256k1",
- 3: "ECDSA",
-}
-
-var KeyType_value = map[string]int32{
- "RSA": 0,
- "Ed25519": 1,
- "Secp256k1": 2,
- "ECDSA": 3,
-}
+// Enum value maps for KeyType.
+var (
+ KeyType_name = map[int32]string{
+ 0: "RSA",
+ 1: "Ed25519",
+ 2: "Secp256k1",
+ 3: "ECDSA",
+ }
+ KeyType_value = map[string]int32{
+ "RSA": 0,
+ "Ed25519": 1,
+ "Secp256k1": 2,
+ "ECDSA": 3,
+ }
+)
func (x KeyType) Enum() *KeyType {
p := new(KeyType)
@@ -53,573 +52,246 @@ func (x KeyType) Enum() *KeyType {
}
func (x KeyType) String() string {
- return proto.EnumName(KeyType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (KeyType) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_crypto_proto_enumTypes[0].Descriptor()
+}
+
+func (KeyType) Type() protoreflect.EnumType {
+ return &file_pb_crypto_proto_enumTypes[0]
+}
+
+func (x KeyType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
}
-func (x *KeyType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(KeyType_value, data, "KeyType")
+// Deprecated: Do not use.
+func (x *KeyType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = KeyType(value)
+ *x = KeyType(num)
return nil
}
+// Deprecated: Use KeyType.Descriptor instead.
func (KeyType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_527278fb02d03321, []int{0}
+ return file_pb_crypto_proto_rawDescGZIP(), []int{0}
}
type PublicKey struct {
- Type KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type"`
- Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *PublicKey) Reset() { *m = PublicKey{} }
-func (m *PublicKey) String() string { return proto.CompactTextString(m) }
-func (*PublicKey) ProtoMessage() {}
-func (*PublicKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_527278fb02d03321, []int{0}
-}
-func (m *PublicKey) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PublicKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PublicKey.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PublicKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PublicKey.Merge(m, src)
-}
-func (m *PublicKey) XXX_Size() int {
- return m.Size()
-}
-func (m *PublicKey) XXX_DiscardUnknown() {
- xxx_messageInfo_PublicKey.DiscardUnknown(m)
+ Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
}
-var xxx_messageInfo_PublicKey proto.InternalMessageInfo
-
-func (m *PublicKey) GetType() KeyType {
- if m != nil {
- return m.Type
+func (x *PublicKey) Reset() {
+ *x = PublicKey{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_crypto_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return KeyType_RSA
}
-func (m *PublicKey) GetData() []byte {
- if m != nil {
- return m.Data
- }
- return nil
+func (x *PublicKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type PrivateKey struct {
- Type KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type"`
- Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data"`
-}
+func (*PublicKey) ProtoMessage() {}
-func (m *PrivateKey) Reset() { *m = PrivateKey{} }
-func (m *PrivateKey) String() string { return proto.CompactTextString(m) }
-func (*PrivateKey) ProtoMessage() {}
-func (*PrivateKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_527278fb02d03321, []int{1}
-}
-func (m *PrivateKey) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PrivateKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PrivateKey.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (x *PublicKey) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_crypto_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *PrivateKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PrivateKey.Merge(m, src)
-}
-func (m *PrivateKey) XXX_Size() int {
- return m.Size()
-}
-func (m *PrivateKey) XXX_DiscardUnknown() {
- xxx_messageInfo_PrivateKey.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_PrivateKey proto.InternalMessageInfo
+// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead.
+func (*PublicKey) Descriptor() ([]byte, []int) {
+ return file_pb_crypto_proto_rawDescGZIP(), []int{0}
+}
-func (m *PrivateKey) GetType() KeyType {
- if m != nil {
- return m.Type
+func (x *PublicKey) GetType() KeyType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return KeyType_RSA
}
-func (m *PrivateKey) GetData() []byte {
- if m != nil {
- return m.Data
+func (x *PublicKey) GetData() []byte {
+ if x != nil {
+ return x.Data
}
return nil
}
-func init() {
- proto.RegisterEnum("crypto.pb.KeyType", KeyType_name, KeyType_value)
- proto.RegisterType((*PublicKey)(nil), "crypto.pb.PublicKey")
- proto.RegisterType((*PrivateKey)(nil), "crypto.pb.PrivateKey")
-}
+type PrivateKey struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func init() { proto.RegisterFile("crypto.proto", fileDescriptor_527278fb02d03321) }
-
-var fileDescriptor_527278fb02d03321 = []byte{
- // 203 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x2e, 0xaa, 0x2c,
- 0x28, 0xc9, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0xf1, 0x92, 0x94, 0x82, 0xb9,
- 0x38, 0x03, 0x4a, 0x93, 0x72, 0x32, 0x93, 0xbd, 0x53, 0x2b, 0x85, 0x74, 0xb8, 0x58, 0x42, 0x2a,
- 0x0b, 0x52, 0x25, 0x18, 0x15, 0x98, 0x34, 0xf8, 0x8c, 0x84, 0xf4, 0xe0, 0xca, 0xf4, 0xbc, 0x53,
- 0x2b, 0x41, 0x32, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x81, 0x55, 0x09, 0x49, 0x70, 0xb1,
- 0xb8, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x29, 0x30, 0x69, 0xf0, 0xc0, 0x64, 0x40, 0x22, 0x4a, 0x21,
- 0x5c, 0x5c, 0x01, 0x45, 0x99, 0x65, 0x89, 0x25, 0xa9, 0x54, 0x34, 0x55, 0xcb, 0x92, 0x8b, 0x1d,
- 0xaa, 0x41, 0x88, 0x9d, 0x8b, 0x39, 0x28, 0xd8, 0x51, 0x80, 0x41, 0x88, 0x9b, 0x8b, 0xdd, 0x35,
- 0xc5, 0xc8, 0xd4, 0xd4, 0xd0, 0x52, 0x80, 0x51, 0x88, 0x97, 0x8b, 0x33, 0x38, 0x35, 0xb9, 0xc0,
- 0xc8, 0xd4, 0x2c, 0xdb, 0x50, 0x80, 0x49, 0x88, 0x93, 0x8b, 0xd5, 0xd5, 0xd9, 0x25, 0xd8, 0x51,
- 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63,
- 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00,
- 0x00, 0xff, 0xff, 0x13, 0xbe, 0xd4, 0xff, 0x19, 0x01, 0x00, 0x00,
+ Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
}
-func (m *PublicKey) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *PrivateKey) Reset() {
+ *x = PrivateKey{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_crypto_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return dAtA[:n], nil
}
-func (m *PublicKey) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (x *PrivateKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *PublicKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintCrypto(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x12
- }
- i = encodeVarintCrypto(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
+func (*PrivateKey) ProtoMessage() {}
-func (m *PrivateKey) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *PrivateKey) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_crypto_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return dAtA[:n], nil
-}
-
-func (m *PrivateKey) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ return mi.MessageOf(x)
}
-func (m *PrivateKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintCrypto(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x12
- }
- i = encodeVarintCrypto(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
+// Deprecated: Use PrivateKey.ProtoReflect.Descriptor instead.
+func (*PrivateKey) Descriptor() ([]byte, []int) {
+ return file_pb_crypto_proto_rawDescGZIP(), []int{1}
}
-func encodeVarintCrypto(dAtA []byte, offset int, v uint64) int {
- offset -= sovCrypto(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *PublicKey) Size() (n int) {
- if m == nil {
- return 0
+func (x *PrivateKey) GetType() KeyType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
- var l int
- _ = l
- n += 1 + sovCrypto(uint64(m.Type))
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovCrypto(uint64(l))
- }
- return n
+ return KeyType_RSA
}
-func (m *PrivateKey) Size() (n int) {
- if m == nil {
- return 0
+func (x *PrivateKey) GetData() []byte {
+ if x != nil {
+ return x.Data
}
- var l int
- _ = l
- n += 1 + sovCrypto(uint64(m.Type))
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovCrypto(uint64(l))
- }
- return n
+ return nil
}
-func sovCrypto(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
+var File_pb_crypto_proto protoreflect.FileDescriptor
+
+var file_pb_crypto_proto_rawDesc = []byte{
+ 0x0a, 0x0f, 0x70, 0x62, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x22, 0x47, 0x0a, 0x09,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70,
+ 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0c, 0x52,
+ 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x48, 0x0a, 0x0a, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65,
+ 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28,
+ 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65,
+ 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44,
+ 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x2a,
+ 0x39, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x53,
+ 0x41, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x70, 0x32, 0x35, 0x36, 0x6b, 0x31, 0x10, 0x02, 0x12,
+ 0x09, 0x0a, 0x05, 0x45, 0x43, 0x44, 0x53, 0x41, 0x10, 0x03, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f,
+ 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f, 0x70, 0x62,
}
-func sozCrypto(x uint64) (n int) {
- return sovCrypto(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *PublicKey) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PublicKey: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PublicKey: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= KeyType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCrypto
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCrypto
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000002)
- default:
- iNdEx = preIndex
- skippy, err := skipCrypto(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthCrypto
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthCrypto
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Type")
- }
- if hasFields[0]&uint64(0x00000002) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Data")
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PrivateKey) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PrivateKey: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PrivateKey: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= KeyType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCrypto
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCrypto
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000002)
- default:
- iNdEx = preIndex
- skippy, err := skipCrypto(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthCrypto
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthCrypto
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Type")
- }
- if hasFields[0]&uint64(0x00000002) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Data")
- }
+var (
+ file_pb_crypto_proto_rawDescOnce sync.Once
+ file_pb_crypto_proto_rawDescData = file_pb_crypto_proto_rawDesc
+)
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func file_pb_crypto_proto_rawDescGZIP() []byte {
+ file_pb_crypto_proto_rawDescOnce.Do(func() {
+ file_pb_crypto_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_crypto_proto_rawDescData)
+ })
+ return file_pb_crypto_proto_rawDescData
+}
+
+var file_pb_crypto_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_pb_crypto_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_pb_crypto_proto_goTypes = []interface{}{
+ (KeyType)(0), // 0: crypto.pb.KeyType
+ (*PublicKey)(nil), // 1: crypto.pb.PublicKey
+ (*PrivateKey)(nil), // 2: crypto.pb.PrivateKey
+}
+var file_pb_crypto_proto_depIdxs = []int32{
+ 0, // 0: crypto.pb.PublicKey.Type:type_name -> crypto.pb.KeyType
+ 0, // 1: crypto.pb.PrivateKey.Type:type_name -> crypto.pb.KeyType
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_pb_crypto_proto_init() }
+func file_pb_crypto_proto_init() {
+ if File_pb_crypto_proto != nil {
+ return
}
- return nil
-}
-func skipCrypto(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ if !protoimpl.UnsafeEnabled {
+ file_pb_crypto_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PublicKey); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
}
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCrypto
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
+ file_pb_crypto_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PrivateKey); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- if length < 0 {
- return 0, ErrInvalidLengthCrypto
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupCrypto
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthCrypto
- }
- if depth == 0 {
- return iNdEx, nil
}
}
- return 0, io.ErrUnexpectedEOF
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_crypto_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_crypto_proto_goTypes,
+ DependencyIndexes: file_pb_crypto_proto_depIdxs,
+ EnumInfos: file_pb_crypto_proto_enumTypes,
+ MessageInfos: file_pb_crypto_proto_msgTypes,
+ }.Build()
+ File_pb_crypto_proto = out.File
+ file_pb_crypto_proto_rawDesc = nil
+ file_pb_crypto_proto_goTypes = nil
+ file_pb_crypto_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthCrypto = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowCrypto = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupCrypto = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go
index c7e305439..2b05eb6a3 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go
@@ -12,9 +12,12 @@ const WeakRsaKeyEnv = "LIBP2P_ALLOW_WEAK_RSA_KEYS"
var MinRsaKeyBits = 2048
+var maxRsaKeyBits = 8192
+
// ErrRsaKeyTooSmall is returned when trying to generate or parse an RSA key
// that's smaller than MinRsaKeyBits bits. In test
var ErrRsaKeyTooSmall error
+var ErrRsaKeyTooBig error = fmt.Errorf("rsa keys must be <= %d bits", maxRsaKeyBits)
func init() {
if _, ok := os.LookupEnv(WeakRsaKeyEnv); ok {
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go
index 1324447d2..f15393094 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go
@@ -1,6 +1,3 @@
-//go:build !openssl
-// +build !openssl
-
package crypto
import (
@@ -34,6 +31,9 @@ func GenerateRSAKeyPair(bits int, src io.Reader) (PrivKey, PubKey, error) {
if bits < MinRsaKeyBits {
return nil, nil, ErrRsaKeyTooSmall
}
+ if bits > maxRsaKeyBits {
+ return nil, nil, ErrRsaKeyTooBig
+ }
priv, err := rsa.GenerateKey(src, bits)
if err != nil {
return nil, nil, err
@@ -127,6 +127,9 @@ func UnmarshalRsaPrivateKey(b []byte) (key PrivKey, err error) {
if sk.N.BitLen() < MinRsaKeyBits {
return nil, ErrRsaKeyTooSmall
}
+ if sk.N.BitLen() > maxRsaKeyBits {
+ return nil, ErrRsaKeyTooBig
+ }
return &RsaPrivateKey{sk: *sk}, nil
}
@@ -144,6 +147,9 @@ func UnmarshalRsaPublicKey(b []byte) (key PubKey, err error) {
if pk.N.BitLen() < MinRsaKeyBits {
return nil, ErrRsaKeyTooSmall
}
+ if pk.N.BitLen() > maxRsaKeyBits {
+ return nil, ErrRsaKeyTooBig
+ }
return &RsaPublicKey{k: *pk}, nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go
deleted file mode 100644
index 4e8269ff4..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go
+++ /dev/null
@@ -1,69 +0,0 @@
-//go:build openssl
-// +build openssl
-
-package crypto
-
-import (
- "errors"
- "io"
-
- openssl "github.com/libp2p/go-openssl"
-)
-
-// RsaPrivateKey is an rsa private key
-type RsaPrivateKey struct {
- opensslPrivateKey
-}
-
-// RsaPublicKey is an rsa public key
-type RsaPublicKey struct {
- opensslPublicKey
-}
-
-// GenerateRSAKeyPair generates a new rsa private and public key
-func GenerateRSAKeyPair(bits int, _ io.Reader) (PrivKey, PubKey, error) {
- if bits < MinRsaKeyBits {
- return nil, nil, ErrRsaKeyTooSmall
- }
-
- key, err := openssl.GenerateRSAKey(bits)
- if err != nil {
- return nil, nil, err
- }
- return &RsaPrivateKey{opensslPrivateKey{key}}, &RsaPublicKey{opensslPublicKey{key: key}}, nil
-}
-
-// GetPublic returns a public key
-func (sk *RsaPrivateKey) GetPublic() PubKey {
- return &RsaPublicKey{opensslPublicKey{key: sk.opensslPrivateKey.key}}
-}
-
-// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes
-func UnmarshalRsaPrivateKey(b []byte) (PrivKey, error) {
- key, err := unmarshalOpensslPrivateKey(b)
- if err != nil {
- return nil, err
- }
- if 8*key.key.Size() < MinRsaKeyBits {
- return nil, ErrRsaKeyTooSmall
- }
- if key.Type() != RSA {
- return nil, errors.New("not actually an rsa public key")
- }
- return &RsaPrivateKey{key}, nil
-}
-
-// UnmarshalRsaPublicKey returns a public key from the input x509 bytes
-func UnmarshalRsaPublicKey(b []byte) (PubKey, error) {
- key, err := unmarshalOpensslPublicKey(b)
- if err != nil {
- return nil, err
- }
- if 8*key.key.Size() < MinRsaKeyBits {
- return nil, ErrRsaKeyTooSmall
- }
- if key.Type() != RSA {
- return nil, errors.New("not actually an rsa public key")
- }
- return &RsaPublicKey{key}, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/event/bus.go b/vendor/github.com/libp2p/go-libp2p/core/event/bus.go
index 0cd8d2ff7..13e18e535 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/event/bus.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/event/bus.go
@@ -39,6 +39,9 @@ type Subscription interface {
// Out returns the channel from which to consume events.
Out() <-chan interface{}
+
+ // Name returns the name for the subscription
+ Name() string
}
// Bus is an interface for a type-based event delivery system.
diff --git a/vendor/github.com/libp2p/go-libp2p/core/host/host.go b/vendor/github.com/libp2p/go-libp2p/core/host/host.go
index cfea91e5b..e62be281f 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/host/host.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/host/host.go
@@ -8,7 +8,6 @@ import (
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/event"
- "github.com/libp2p/go-libp2p/core/introspection"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
@@ -53,7 +52,7 @@ type Host interface {
// SetStreamHandlerMatch sets the protocol handler on the Host's Mux
// using a matching function for protocol selection.
- SetStreamHandlerMatch(protocol.ID, func(string) bool, network.StreamHandler)
+ SetStreamHandlerMatch(protocol.ID, func(protocol.ID) bool, network.StreamHandler)
// RemoveStreamHandler removes a handler on the mux that was set by
// SetStreamHandler
@@ -74,16 +73,3 @@ type Host interface {
// EventBus returns the hosts eventbus
EventBus() event.Bus
}
-
-// IntrospectableHost is implemented by Host implementations that are
-// introspectable, that is, that may have introspection capability.
-type IntrospectableHost interface {
- // Introspector returns the introspector, or nil if one hasn't been
- // registered. With it, the call can register data providers, and can fetch
- // introspection data.
- Introspector() introspection.Introspector
-
- // IntrospectionEndpoint returns the introspection endpoint, or nil if one
- // hasn't been registered.
- IntrospectionEndpoint() introspection.Endpoint
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go
deleted file mode 100644
index 302c23f4c..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Package introspection is EXPERIMENTAL. It is subject to heavy change, and it
-// WILL change. For now, it is the simplest implementation to power the
-// proof-of-concept of the libp2p introspection framework.
-//
-// Package introspect contains the abstract skeleton of the introspection system
-// of go-libp2p, and holds the introspection data schema.
-package introspection
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go
deleted file mode 100644
index 51596a464..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package introspection
-
-// Endpoint is the interface to be implemented by introspection endpoints.
-//
-// An introspection endpoint makes introspection data accessible to external
-// consumers, over, for example, WebSockets, or TCP, or libp2p itself.
-//
-// Experimental.
-type Endpoint interface {
- // Start starts the introspection endpoint. It must only be called once, and
- // once the server is started, subsequent calls made without first calling
- // Close will error.
- Start() error
-
- // Close stops the introspection endpoint. Calls to Close on an already
- // closed endpoint (or an unstarted endpoint) must noop.
- Close() error
-
- // ListenAddrs returns the listen addresses of this endpoint.
- ListenAddrs() []string
-
- // Sessions returns the ongoing sessions of this endpoint.
- Sessions() []*Session
-}
-
-// Session represents an introspection session.
-type Session struct {
- // RemoteAddr is the remote address of the session.
- RemoteAddr string
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go
deleted file mode 100644
index e39f9673b..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package introspection
-
-import (
- "io"
-
- "github.com/libp2p/go-libp2p/core/introspection/pb"
-)
-
-// Introspector is the interface to be satisfied by components that are capable
-// of spelunking the state of the system, and representing in accordance with
-// the introspection schema.
-//
-// It's very rare to build a custom implementation of this interface;
-// it exists mostly for mocking. In most cases, you'll end up using the
-// default introspector.
-//
-// Introspector implementations are usually injected in introspection endpoints
-// to serve the data to clients, but they can also be used separately for
-// embedding or testing.
-//
-// Experimental.
-type Introspector interface {
- io.Closer
-
- // FetchRuntime returns the runtime information of the system.
- FetchRuntime() (*pb.Runtime, error)
-
- // FetchFullState returns the full state cross-cut of the running system.
- FetchFullState() (*pb.State, error)
-
- // EventChan returns the channel where all eventbus events are dumped,
- // decorated with their corresponding event metadata, ready to send over
- // the wire.
- EventChan() <-chan *pb.Event
-
- // EventMetadata returns the metadata of all events known to the
- // Introspector.
- EventMetadata() []*pb.EventType
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile
deleted file mode 100644
index 731317654..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(PWD):$(PWD)/../..:$(GOPATH)/src --gogofaster_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types:. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go
deleted file mode 100644
index 58f6c50db..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// Package introspection/pb contains the protobuf definitions and objects for
-// that form the libp2p introspection protocol.
-package pb
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go
deleted file mode 100644
index b8c609d82..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go
+++ /dev/null
@@ -1,9718 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: introspection.proto
-
-package pb
-
-import (
- fmt "fmt"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// The status of a connection or stream.
-type Status int32
-
-const (
- Status_ACTIVE Status = 0
- Status_CLOSED Status = 1
- Status_OPENING Status = 2
- Status_CLOSING Status = 3
- Status_ERROR Status = 4
-)
-
-var Status_name = map[int32]string{
- 0: "ACTIVE",
- 1: "CLOSED",
- 2: "OPENING",
- 3: "CLOSING",
- 4: "ERROR",
-}
-
-var Status_value = map[string]int32{
- "ACTIVE": 0,
- "CLOSED": 1,
- "OPENING": 2,
- "CLOSING": 3,
- "ERROR": 4,
-}
-
-func (x Status) String() string {
- return proto.EnumName(Status_name, int32(x))
-}
-
-func (Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{0}
-}
-
-// Our role in a connection or stream.
-type Role int32
-
-const (
- Role_INITIATOR Role = 0
- Role_RESPONDER Role = 1
-)
-
-var Role_name = map[int32]string{
- 0: "INITIATOR",
- 1: "RESPONDER",
-}
-
-var Role_value = map[string]int32{
- "INITIATOR": 0,
- "RESPONDER": 1,
-}
-
-func (x Role) String() string {
- return proto.EnumName(Role_name, int32(x))
-}
-
-func (Role) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{1}
-}
-
-// tells client how to sort, filter or display known content properties
-type EventType_EventProperty_PropertyType int32
-
-const (
- // for properties to treat as a simple primitive
- EventType_EventProperty_STRING EventType_EventProperty_PropertyType = 0
- EventType_EventProperty_NUMBER EventType_EventProperty_PropertyType = 1
- // for properties with special human-readable formatting
- EventType_EventProperty_TIME EventType_EventProperty_PropertyType = 10
- EventType_EventProperty_PEERID EventType_EventProperty_PropertyType = 11
- EventType_EventProperty_MULTIADDR EventType_EventProperty_PropertyType = 12
- // for complex structures like nested arrays, object trees etc
- EventType_EventProperty_JSON EventType_EventProperty_PropertyType = 90
-)
-
-var EventType_EventProperty_PropertyType_name = map[int32]string{
- 0: "STRING",
- 1: "NUMBER",
- 10: "TIME",
- 11: "PEERID",
- 12: "MULTIADDR",
- 90: "JSON",
-}
-
-var EventType_EventProperty_PropertyType_value = map[string]int32{
- "STRING": 0,
- "NUMBER": 1,
- "TIME": 10,
- "PEERID": 11,
- "MULTIADDR": 12,
- "JSON": 90,
-}
-
-func (x EventType_EventProperty_PropertyType) String() string {
- return proto.EnumName(EventType_EventProperty_PropertyType_name, int32(x))
-}
-
-func (EventType_EventProperty_PropertyType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{4, 0, 0}
-}
-
-// The DHT's relationship with this peer
-type DHT_PeerInDHT_Status int32
-
-const (
- // Connected, in a bucket, ready to send/receive queries
- DHT_PeerInDHT_ACTIVE DHT_PeerInDHT_Status = 0
- // Not currently connected, still "in" a bucket (e.g. temporarily disconnected)
- DHT_PeerInDHT_MISSING DHT_PeerInDHT_Status = 1
- // Removed from a bucket or candidate list (e.g. connection lost or too slow)
- DHT_PeerInDHT_REJECTED DHT_PeerInDHT_Status = 2
- // Was reachable when last checked, waiting to join a currently-full bucket
- DHT_PeerInDHT_CANDIDATE DHT_PeerInDHT_Status = 3
-)
-
-var DHT_PeerInDHT_Status_name = map[int32]string{
- 0: "ACTIVE",
- 1: "MISSING",
- 2: "REJECTED",
- 3: "CANDIDATE",
-}
-
-var DHT_PeerInDHT_Status_value = map[string]int32{
- "ACTIVE": 0,
- "MISSING": 1,
- "REJECTED": 2,
- "CANDIDATE": 3,
-}
-
-func (x DHT_PeerInDHT_Status) String() string {
- return proto.EnumName(DHT_PeerInDHT_Status_name, int32(x))
-}
-
-func (DHT_PeerInDHT_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{11, 1, 0}
-}
-
-type ClientCommand_Source int32
-
-const (
- ClientCommand_STATE ClientCommand_Source = 0
- ClientCommand_RUNTIME ClientCommand_Source = 1
- ClientCommand_EVENTS ClientCommand_Source = 2
-)
-
-var ClientCommand_Source_name = map[int32]string{
- 0: "STATE",
- 1: "RUNTIME",
- 2: "EVENTS",
-}
-
-var ClientCommand_Source_value = map[string]int32{
- "STATE": 0,
- "RUNTIME": 1,
- "EVENTS": 2,
-}
-
-func (x ClientCommand_Source) String() string {
- return proto.EnumName(ClientCommand_Source_name, int32(x))
-}
-
-func (ClientCommand_Source) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{17, 0}
-}
-
-type ClientCommand_Command int32
-
-const (
- // HELLO is the first command that a client must send to greet the server.
- // Connections that do not respect this invariant will be terminated.
- ClientCommand_HELLO ClientCommand_Command = 0
- // REQUEST is applicable to STATE and RUNTIME sources.
- ClientCommand_REQUEST ClientCommand_Command = 1
- // PUSH streams can only be started for STATE and EVENTS sources.
- ClientCommand_PUSH_ENABLE ClientCommand_Command = 2
- ClientCommand_PUSH_DISABLE ClientCommand_Command = 3
- ClientCommand_PUSH_PAUSE ClientCommand_Command = 4
- ClientCommand_PUSH_RESUME ClientCommand_Command = 5
- // UPDATE_CONFIG requests a configuration update. The config field is
- // compulsory.
- //
- // The server reserves the right to override the requested values, and
- // will return the effective configuration in the response.
- ClientCommand_UPDATE_CONFIG ClientCommand_Command = 7
-)
-
-var ClientCommand_Command_name = map[int32]string{
- 0: "HELLO",
- 1: "REQUEST",
- 2: "PUSH_ENABLE",
- 3: "PUSH_DISABLE",
- 4: "PUSH_PAUSE",
- 5: "PUSH_RESUME",
- 7: "UPDATE_CONFIG",
-}
-
-var ClientCommand_Command_value = map[string]int32{
- "HELLO": 0,
- "REQUEST": 1,
- "PUSH_ENABLE": 2,
- "PUSH_DISABLE": 3,
- "PUSH_PAUSE": 4,
- "PUSH_RESUME": 5,
- "UPDATE_CONFIG": 7,
-}
-
-func (x ClientCommand_Command) String() string {
- return proto.EnumName(ClientCommand_Command_name, int32(x))
-}
-
-func (ClientCommand_Command) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{17, 1}
-}
-
-type CommandResponse_Result int32
-
-const (
- CommandResponse_OK CommandResponse_Result = 0
- CommandResponse_ERR CommandResponse_Result = 1
-)
-
-var CommandResponse_Result_name = map[int32]string{
- 0: "OK",
- 1: "ERR",
-}
-
-var CommandResponse_Result_value = map[string]int32{
- "OK": 0,
- "ERR": 1,
-}
-
-func (x CommandResponse_Result) String() string {
- return proto.EnumName(CommandResponse_Result_name, int32(x))
-}
-
-func (CommandResponse_Result) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{18, 0}
-}
-
-type ServerNotice_Kind int32
-
-const (
- ServerNotice_DISCARDING_EVENTS ServerNotice_Kind = 0
-)
-
-var ServerNotice_Kind_name = map[int32]string{
- 0: "DISCARDING_EVENTS",
-}
-
-var ServerNotice_Kind_value = map[string]int32{
- "DISCARDING_EVENTS": 0,
-}
-
-func (x ServerNotice_Kind) String() string {
- return proto.EnumName(ServerNotice_Kind_name, int32(x))
-}
-
-func (ServerNotice_Kind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{19, 0}
-}
-
-// Version of schema
-type Version struct {
- Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
-}
-
-func (m *Version) Reset() { *m = Version{} }
-func (m *Version) String() string { return proto.CompactTextString(m) }
-func (*Version) ProtoMessage() {}
-func (*Version) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{0}
-}
-func (m *Version) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Version.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Version) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Version.Merge(m, src)
-}
-func (m *Version) XXX_Size() int {
- return m.Size()
-}
-func (m *Version) XXX_DiscardUnknown() {
- xxx_messageInfo_Version.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Version proto.InternalMessageInfo
-
-func (m *Version) GetVersion() uint32 {
- if m != nil {
- return m.Version
- }
- return 0
-}
-
-// ResultCounter is a monotonically increasing counter that reports an ok/err breakdown of the total.
-type ResultCounter struct {
- Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
- Ok uint32 `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"`
- Err uint32 `protobuf:"varint,3,opt,name=err,proto3" json:"err,omitempty"`
-}
-
-func (m *ResultCounter) Reset() { *m = ResultCounter{} }
-func (m *ResultCounter) String() string { return proto.CompactTextString(m) }
-func (*ResultCounter) ProtoMessage() {}
-func (*ResultCounter) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{1}
-}
-func (m *ResultCounter) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResultCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResultCounter.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResultCounter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResultCounter.Merge(m, src)
-}
-func (m *ResultCounter) XXX_Size() int {
- return m.Size()
-}
-func (m *ResultCounter) XXX_DiscardUnknown() {
- xxx_messageInfo_ResultCounter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResultCounter proto.InternalMessageInfo
-
-func (m *ResultCounter) GetTotal() uint32 {
- if m != nil {
- return m.Total
- }
- return 0
-}
-
-func (m *ResultCounter) GetOk() uint32 {
- if m != nil {
- return m.Ok
- }
- return 0
-}
-
-func (m *ResultCounter) GetErr() uint32 {
- if m != nil {
- return m.Err
- }
- return 0
-}
-
-// Moving totals over sliding time windows. Models sensible time windows,
-// we don't have to populate them all at once.
-//
-// Graphical example:
-//
-// time past -> present an event 16 min ago
-// ======================================================X================>>
-//
-// | | 1m
-// | |---| 5m
-// | |-------------| 15m
-// |------------X---------------| 30m
-// |------------------------------------------X---------------| 60m
-type SlidingCounter struct {
- Over_1M uint32 `protobuf:"varint,1,opt,name=over_1m,json=over1m,proto3" json:"over_1m,omitempty"`
- Over_5M uint32 `protobuf:"varint,2,opt,name=over_5m,json=over5m,proto3" json:"over_5m,omitempty"`
- Over_15M uint32 `protobuf:"varint,3,opt,name=over_15m,json=over15m,proto3" json:"over_15m,omitempty"`
- Over_30M uint32 `protobuf:"varint,4,opt,name=over_30m,json=over30m,proto3" json:"over_30m,omitempty"`
- Over_1Hr uint32 `protobuf:"varint,5,opt,name=over_1hr,json=over1hr,proto3" json:"over_1hr,omitempty"`
- Over_2Hr uint32 `protobuf:"varint,6,opt,name=over_2hr,json=over2hr,proto3" json:"over_2hr,omitempty"`
- Over_4Hr uint32 `protobuf:"varint,7,opt,name=over_4hr,json=over4hr,proto3" json:"over_4hr,omitempty"`
- Over_8Hr uint32 `protobuf:"varint,8,opt,name=over_8hr,json=over8hr,proto3" json:"over_8hr,omitempty"`
- Over_12Hr uint32 `protobuf:"varint,9,opt,name=over_12hr,json=over12hr,proto3" json:"over_12hr,omitempty"`
- Over_24Hr uint32 `protobuf:"varint,10,opt,name=over_24hr,json=over24hr,proto3" json:"over_24hr,omitempty"`
-}
-
-func (m *SlidingCounter) Reset() { *m = SlidingCounter{} }
-func (m *SlidingCounter) String() string { return proto.CompactTextString(m) }
-func (*SlidingCounter) ProtoMessage() {}
-func (*SlidingCounter) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{2}
-}
-func (m *SlidingCounter) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SlidingCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SlidingCounter.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SlidingCounter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SlidingCounter.Merge(m, src)
-}
-func (m *SlidingCounter) XXX_Size() int {
- return m.Size()
-}
-func (m *SlidingCounter) XXX_DiscardUnknown() {
- xxx_messageInfo_SlidingCounter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SlidingCounter proto.InternalMessageInfo
-
-func (m *SlidingCounter) GetOver_1M() uint32 {
- if m != nil {
- return m.Over_1M
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_5M() uint32 {
- if m != nil {
- return m.Over_5M
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_15M() uint32 {
- if m != nil {
- return m.Over_15M
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_30M() uint32 {
- if m != nil {
- return m.Over_30M
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_1Hr() uint32 {
- if m != nil {
- return m.Over_1Hr
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_2Hr() uint32 {
- if m != nil {
- return m.Over_2Hr
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_4Hr() uint32 {
- if m != nil {
- return m.Over_4Hr
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_8Hr() uint32 {
- if m != nil {
- return m.Over_8Hr
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_12Hr() uint32 {
- if m != nil {
- return m.Over_12Hr
- }
- return 0
-}
-
-func (m *SlidingCounter) GetOver_24Hr() uint32 {
- if m != nil {
- return m.Over_24Hr
- }
- return 0
-}
-
-// DataGauge reports stats for data traffic in a given direction.
-type DataGauge struct {
- // Cumulative bytes.
- CumBytes uint64 `protobuf:"varint,1,opt,name=cum_bytes,json=cumBytes,proto3" json:"cum_bytes,omitempty"`
- // Cumulative packets.
- CumPackets uint64 `protobuf:"varint,2,opt,name=cum_packets,json=cumPackets,proto3" json:"cum_packets,omitempty"`
- // Instantaneous bandwidth measurement (bytes/second).
- InstBw uint64 `protobuf:"varint,3,opt,name=inst_bw,json=instBw,proto3" json:"inst_bw,omitempty"`
-}
-
-func (m *DataGauge) Reset() { *m = DataGauge{} }
-func (m *DataGauge) String() string { return proto.CompactTextString(m) }
-func (*DataGauge) ProtoMessage() {}
-func (*DataGauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{3}
-}
-func (m *DataGauge) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DataGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DataGauge.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DataGauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DataGauge.Merge(m, src)
-}
-func (m *DataGauge) XXX_Size() int {
- return m.Size()
-}
-func (m *DataGauge) XXX_DiscardUnknown() {
- xxx_messageInfo_DataGauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DataGauge proto.InternalMessageInfo
-
-func (m *DataGauge) GetCumBytes() uint64 {
- if m != nil {
- return m.CumBytes
- }
- return 0
-}
-
-func (m *DataGauge) GetCumPackets() uint64 {
- if m != nil {
- return m.CumPackets
- }
- return 0
-}
-
-func (m *DataGauge) GetInstBw() uint64 {
- if m != nil {
- return m.InstBw
- }
- return 0
-}
-
-// describes a type of event
-type EventType struct {
- // name of event type, e.g. PeerConnecting
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // for runtime, send property_types for all events already seen in events list
- // for events, only send property_types in the first event of a type not in runtime
- PropertyTypes []*EventType_EventProperty `protobuf:"bytes,2,rep,name=property_types,json=propertyTypes,proto3" json:"property_types,omitempty"`
-}
-
-func (m *EventType) Reset() { *m = EventType{} }
-func (m *EventType) String() string { return proto.CompactTextString(m) }
-func (*EventType) ProtoMessage() {}
-func (*EventType) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{4}
-}
-func (m *EventType) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EventType.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EventType) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventType.Merge(m, src)
-}
-func (m *EventType) XXX_Size() int {
- return m.Size()
-}
-func (m *EventType) XXX_DiscardUnknown() {
- xxx_messageInfo_EventType.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventType proto.InternalMessageInfo
-
-func (m *EventType) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *EventType) GetPropertyTypes() []*EventType_EventProperty {
- if m != nil {
- return m.PropertyTypes
- }
- return nil
-}
-
-// metadata about content types in event's top-level content JSON
-type EventType_EventProperty struct {
- // property name of content e.g. openTs
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // type to interpret content value as
- Type EventType_EventProperty_PropertyType `protobuf:"varint,2,opt,name=type,proto3,enum=pb.EventType_EventProperty_PropertyType" json:"type,omitempty"`
- // if true, expect an array of values of `type`; else, singular
- HasMultiple bool `protobuf:"varint,3,opt,name=has_multiple,json=hasMultiple,proto3" json:"has_multiple,omitempty"`
-}
-
-func (m *EventType_EventProperty) Reset() { *m = EventType_EventProperty{} }
-func (m *EventType_EventProperty) String() string { return proto.CompactTextString(m) }
-func (*EventType_EventProperty) ProtoMessage() {}
-func (*EventType_EventProperty) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{4, 0}
-}
-func (m *EventType_EventProperty) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventType_EventProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EventType_EventProperty.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EventType_EventProperty) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventType_EventProperty.Merge(m, src)
-}
-func (m *EventType_EventProperty) XXX_Size() int {
- return m.Size()
-}
-func (m *EventType_EventProperty) XXX_DiscardUnknown() {
- xxx_messageInfo_EventType_EventProperty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventType_EventProperty proto.InternalMessageInfo
-
-func (m *EventType_EventProperty) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *EventType_EventProperty) GetType() EventType_EventProperty_PropertyType {
- if m != nil {
- return m.Type
- }
- return EventType_EventProperty_STRING
-}
-
-func (m *EventType_EventProperty) GetHasMultiple() bool {
- if m != nil {
- return m.HasMultiple
- }
- return false
-}
-
-// Runtime encapsulates runtime info about a node.
-type Runtime struct {
- // e.g. go-libp2p, js-libp2p, rust-libp2p, etc.
- Implementation string `protobuf:"bytes,1,opt,name=implementation,proto3" json:"implementation,omitempty"`
- // e.g. 1.2.3.
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- // e.g. Windows, Unix, macOS, Chrome, Mozilla, etc.
- Platform string `protobuf:"bytes,3,opt,name=platform,proto3" json:"platform,omitempty"`
- // our peer id - the peer id of the host system
- PeerId string `protobuf:"bytes,4,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
- // metadata describing configured event types
- EventTypes []*EventType `protobuf:"bytes,7,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
-}
-
-func (m *Runtime) Reset() { *m = Runtime{} }
-func (m *Runtime) String() string { return proto.CompactTextString(m) }
-func (*Runtime) ProtoMessage() {}
-func (*Runtime) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{5}
-}
-func (m *Runtime) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Runtime.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Runtime) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Runtime.Merge(m, src)
-}
-func (m *Runtime) XXX_Size() int {
- return m.Size()
-}
-func (m *Runtime) XXX_DiscardUnknown() {
- xxx_messageInfo_Runtime.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Runtime proto.InternalMessageInfo
-
-func (m *Runtime) GetImplementation() string {
- if m != nil {
- return m.Implementation
- }
- return ""
-}
-
-func (m *Runtime) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-func (m *Runtime) GetPlatform() string {
- if m != nil {
- return m.Platform
- }
- return ""
-}
-
-func (m *Runtime) GetPeerId() string {
- if m != nil {
- return m.PeerId
- }
- return ""
-}
-
-func (m *Runtime) GetEventTypes() []*EventType {
- if m != nil {
- return m.EventTypes
- }
- return nil
-}
-
-// EndpointPair is a pair of multiaddrs.
-type EndpointPair struct {
- // the source multiaddr.
- SrcMultiaddr string `protobuf:"bytes,1,opt,name=src_multiaddr,json=srcMultiaddr,proto3" json:"src_multiaddr,omitempty"`
- // the destination multiaddr.
- DstMultiaddr string `protobuf:"bytes,2,opt,name=dst_multiaddr,json=dstMultiaddr,proto3" json:"dst_multiaddr,omitempty"`
-}
-
-func (m *EndpointPair) Reset() { *m = EndpointPair{} }
-func (m *EndpointPair) String() string { return proto.CompactTextString(m) }
-func (*EndpointPair) ProtoMessage() {}
-func (*EndpointPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{6}
-}
-func (m *EndpointPair) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EndpointPair.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EndpointPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointPair.Merge(m, src)
-}
-func (m *EndpointPair) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointPair) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointPair.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointPair proto.InternalMessageInfo
-
-func (m *EndpointPair) GetSrcMultiaddr() string {
- if m != nil {
- return m.SrcMultiaddr
- }
- return ""
-}
-
-func (m *EndpointPair) GetDstMultiaddr() string {
- if m != nil {
- return m.DstMultiaddr
- }
- return ""
-}
-
-// Traffic encloses data transfer statistics.
-type Traffic struct {
- // snapshot of the data in metrics.
- TrafficIn *DataGauge `protobuf:"bytes,1,opt,name=traffic_in,json=trafficIn,proto3" json:"traffic_in,omitempty"`
- // snapshot of the data out metrics.
- TrafficOut *DataGauge `protobuf:"bytes,2,opt,name=traffic_out,json=trafficOut,proto3" json:"traffic_out,omitempty"`
-}
-
-func (m *Traffic) Reset() { *m = Traffic{} }
-func (m *Traffic) String() string { return proto.CompactTextString(m) }
-func (*Traffic) ProtoMessage() {}
-func (*Traffic) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{7}
-}
-func (m *Traffic) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Traffic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Traffic.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Traffic) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Traffic.Merge(m, src)
-}
-func (m *Traffic) XXX_Size() int {
- return m.Size()
-}
-func (m *Traffic) XXX_DiscardUnknown() {
- xxx_messageInfo_Traffic.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Traffic proto.InternalMessageInfo
-
-func (m *Traffic) GetTrafficIn() *DataGauge {
- if m != nil {
- return m.TrafficIn
- }
- return nil
-}
-
-func (m *Traffic) GetTrafficOut() *DataGauge {
- if m != nil {
- return m.TrafficOut
- }
- return nil
-}
-
-// a list of streams, by reference or inlined.
-type StreamList struct {
- // NOTE: only one of the next 2 fields can appear, but proto3
- // doesn't support combining oneof and repeated.
- //
- // streams within this connection by reference.
- StreamIds [][]byte `protobuf:"bytes,1,rep,name=stream_ids,json=streamIds,proto3" json:"stream_ids,omitempty"`
- // streams within this connection by inlining.
- Streams []*Stream `protobuf:"bytes,2,rep,name=streams,proto3" json:"streams,omitempty"`
-}
-
-func (m *StreamList) Reset() { *m = StreamList{} }
-func (m *StreamList) String() string { return proto.CompactTextString(m) }
-func (*StreamList) ProtoMessage() {}
-func (*StreamList) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{8}
-}
-func (m *StreamList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_StreamList.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *StreamList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamList.Merge(m, src)
-}
-func (m *StreamList) XXX_Size() int {
- return m.Size()
-}
-func (m *StreamList) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamList proto.InternalMessageInfo
-
-func (m *StreamList) GetStreamIds() [][]byte {
- if m != nil {
- return m.StreamIds
- }
- return nil
-}
-
-func (m *StreamList) GetStreams() []*Stream {
- if m != nil {
- return m.Streams
- }
- return nil
-}
-
-// Connection reports metrics and state of a libp2p connection.
-type Connection struct {
- // the id of this connection, not to be shown in user tooling,
- // used for (cross)referencing connections (e.g. relay).
- Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // the peer id of the other party.
- PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
- // the status of this connection.
- Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=pb.Status" json:"status,omitempty"`
- // a reference to the transport managing this connection.
- TransportId []byte `protobuf:"bytes,4,opt,name=transport_id,json=transportId,proto3" json:"transport_id,omitempty"`
- // the endpoints participating in this connection.
- Endpoints *EndpointPair `protobuf:"bytes,5,opt,name=endpoints,proto3" json:"endpoints,omitempty"`
- // the timeline of the connection, see Connection.Timeline.
- Timeline *Connection_Timeline `protobuf:"bytes,6,opt,name=timeline,proto3" json:"timeline,omitempty"`
- // our role in this connection.
- Role Role `protobuf:"varint,7,opt,name=role,proto3,enum=pb.Role" json:"role,omitempty"`
- // traffic statistics.
- Traffic *Traffic `protobuf:"bytes,8,opt,name=traffic,proto3" json:"traffic,omitempty"`
- // properties of this connection.
- Attribs *Connection_Attributes `protobuf:"bytes,9,opt,name=attribs,proto3" json:"attribs,omitempty"`
- // the instantaneous latency of this connection in nanoseconds.
- LatencyNs uint64 `protobuf:"varint,10,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"`
- // streams within this connection.
- Streams *StreamList `protobuf:"bytes,11,opt,name=streams,proto3" json:"streams,omitempty"`
- // if this is a relayed connection, this points to the relaying connection.
- // a default value here (empty bytes) indicates this is not a relayed connection.
- //
- // Types that are valid to be assigned to RelayedOver:
- // *Connection_ConnId
- // *Connection_Conn
- RelayedOver isConnection_RelayedOver `protobuf_oneof:"relayed_over"`
- // user provided tags.
- UserProvidedTags []string `protobuf:"bytes,99,rep,name=user_provided_tags,json=userProvidedTags,proto3" json:"user_provided_tags,omitempty"`
-}
-
-func (m *Connection) Reset() { *m = Connection{} }
-func (m *Connection) String() string { return proto.CompactTextString(m) }
-func (*Connection) ProtoMessage() {}
-func (*Connection) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{9}
-}
-func (m *Connection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Connection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Connection.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Connection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Connection.Merge(m, src)
-}
-func (m *Connection) XXX_Size() int {
- return m.Size()
-}
-func (m *Connection) XXX_DiscardUnknown() {
- xxx_messageInfo_Connection.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Connection proto.InternalMessageInfo
-
-type isConnection_RelayedOver interface {
- isConnection_RelayedOver()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Connection_ConnId struct {
- ConnId []byte `protobuf:"bytes,16,opt,name=conn_id,json=connId,proto3,oneof" json:"conn_id,omitempty"`
-}
-type Connection_Conn struct {
- Conn *Connection `protobuf:"bytes,17,opt,name=conn,proto3,oneof" json:"conn,omitempty"`
-}
-
-func (*Connection_ConnId) isConnection_RelayedOver() {}
-func (*Connection_Conn) isConnection_RelayedOver() {}
-
-func (m *Connection) GetRelayedOver() isConnection_RelayedOver {
- if m != nil {
- return m.RelayedOver
- }
- return nil
-}
-
-func (m *Connection) GetId() []byte {
- if m != nil {
- return m.Id
- }
- return nil
-}
-
-func (m *Connection) GetPeerId() string {
- if m != nil {
- return m.PeerId
- }
- return ""
-}
-
-func (m *Connection) GetStatus() Status {
- if m != nil {
- return m.Status
- }
- return Status_ACTIVE
-}
-
-func (m *Connection) GetTransportId() []byte {
- if m != nil {
- return m.TransportId
- }
- return nil
-}
-
-func (m *Connection) GetEndpoints() *EndpointPair {
- if m != nil {
- return m.Endpoints
- }
- return nil
-}
-
-func (m *Connection) GetTimeline() *Connection_Timeline {
- if m != nil {
- return m.Timeline
- }
- return nil
-}
-
-func (m *Connection) GetRole() Role {
- if m != nil {
- return m.Role
- }
- return Role_INITIATOR
-}
-
-func (m *Connection) GetTraffic() *Traffic {
- if m != nil {
- return m.Traffic
- }
- return nil
-}
-
-func (m *Connection) GetAttribs() *Connection_Attributes {
- if m != nil {
- return m.Attribs
- }
- return nil
-}
-
-func (m *Connection) GetLatencyNs() uint64 {
- if m != nil {
- return m.LatencyNs
- }
- return 0
-}
-
-func (m *Connection) GetStreams() *StreamList {
- if m != nil {
- return m.Streams
- }
- return nil
-}
-
-func (m *Connection) GetConnId() []byte {
- if x, ok := m.GetRelayedOver().(*Connection_ConnId); ok {
- return x.ConnId
- }
- return nil
-}
-
-func (m *Connection) GetConn() *Connection {
- if x, ok := m.GetRelayedOver().(*Connection_Conn); ok {
- return x.Conn
- }
- return nil
-}
-
-func (m *Connection) GetUserProvidedTags() []string {
- if m != nil {
- return m.UserProvidedTags
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Connection) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Connection_ConnId)(nil),
- (*Connection_Conn)(nil),
- }
-}
-
-// Timeline contains the timestamps (ms since epoch) of the well-known milestones of a connection.
-type Connection_Timeline struct {
- // the instant when a connection was opened on the wire.
- OpenTs uint64 `protobuf:"varint,1,opt,name=open_ts,json=openTs,proto3" json:"open_ts,omitempty"`
- // the instant when the upgrade process (handshake, security, multiplexing) finished.
- UpgradedTs uint64 `protobuf:"varint,2,opt,name=upgraded_ts,json=upgradedTs,proto3" json:"upgraded_ts,omitempty"`
- // the instant when this connection was terminated.
- CloseTs uint64 `protobuf:"varint,3,opt,name=close_ts,json=closeTs,proto3" json:"close_ts,omitempty"`
-}
-
-func (m *Connection_Timeline) Reset() { *m = Connection_Timeline{} }
-func (m *Connection_Timeline) String() string { return proto.CompactTextString(m) }
-func (*Connection_Timeline) ProtoMessage() {}
-func (*Connection_Timeline) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{9, 0}
-}
-func (m *Connection_Timeline) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Connection_Timeline) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Connection_Timeline.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Connection_Timeline) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Connection_Timeline.Merge(m, src)
-}
-func (m *Connection_Timeline) XXX_Size() int {
- return m.Size()
-}
-func (m *Connection_Timeline) XXX_DiscardUnknown() {
- xxx_messageInfo_Connection_Timeline.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Connection_Timeline proto.InternalMessageInfo
-
-func (m *Connection_Timeline) GetOpenTs() uint64 {
- if m != nil {
- return m.OpenTs
- }
- return 0
-}
-
-func (m *Connection_Timeline) GetUpgradedTs() uint64 {
- if m != nil {
- return m.UpgradedTs
- }
- return 0
-}
-
-func (m *Connection_Timeline) GetCloseTs() uint64 {
- if m != nil {
- return m.CloseTs
- }
- return 0
-}
-
-// Attributes encapsulates the attributes of this connection.
-type Connection_Attributes struct {
- // the multiplexer being used.
- Multiplexer string `protobuf:"bytes,1,opt,name=multiplexer,proto3" json:"multiplexer,omitempty"`
- // the encryption method being used.
- Encryption string `protobuf:"bytes,2,opt,name=encryption,proto3" json:"encryption,omitempty"`
-}
-
-func (m *Connection_Attributes) Reset() { *m = Connection_Attributes{} }
-func (m *Connection_Attributes) String() string { return proto.CompactTextString(m) }
-func (*Connection_Attributes) ProtoMessage() {}
-func (*Connection_Attributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{9, 1}
-}
-func (m *Connection_Attributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Connection_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Connection_Attributes.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Connection_Attributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Connection_Attributes.Merge(m, src)
-}
-func (m *Connection_Attributes) XXX_Size() int {
- return m.Size()
-}
-func (m *Connection_Attributes) XXX_DiscardUnknown() {
- xxx_messageInfo_Connection_Attributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Connection_Attributes proto.InternalMessageInfo
-
-func (m *Connection_Attributes) GetMultiplexer() string {
- if m != nil {
- return m.Multiplexer
- }
- return ""
-}
-
-func (m *Connection_Attributes) GetEncryption() string {
- if m != nil {
- return m.Encryption
- }
- return ""
-}
-
-// Stream reports metrics and state of a libp2p stream.
-type Stream struct {
- // the id of this stream, not to be shown in user tooling,
- // used for (cross)referencing streams.
- Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // the protocol pinned to this stream.
- Protocol string `protobuf:"bytes,2,opt,name=protocol,proto3" json:"protocol,omitempty"`
- // our role in this stream.
- Role Role `protobuf:"varint,3,opt,name=role,proto3,enum=pb.Role" json:"role,omitempty"`
- // traffic statistics.
- Traffic *Traffic `protobuf:"bytes,4,opt,name=traffic,proto3" json:"traffic,omitempty"`
- // the connection this stream is hosted under.
- Conn *Stream_ConnectionRef `protobuf:"bytes,5,opt,name=conn,proto3" json:"conn,omitempty"`
- // the timeline of the stream, see Stream.Timeline.
- Timeline *Stream_Timeline `protobuf:"bytes,6,opt,name=timeline,proto3" json:"timeline,omitempty"`
- // the status of this stream.
- Status Status `protobuf:"varint,7,opt,name=status,proto3,enum=pb.Status" json:"status,omitempty"`
- // the instantaneous latency of this stream in nanoseconds.
- // TODO: this is hard to calculate.
- LatencyNs uint64 `protobuf:"varint,16,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"`
- // user provided tags.
- UserProvidedTags []string `protobuf:"bytes,99,rep,name=user_provided_tags,json=userProvidedTags,proto3" json:"user_provided_tags,omitempty"`
-}
-
-func (m *Stream) Reset() { *m = Stream{} }
-func (m *Stream) String() string { return proto.CompactTextString(m) }
-func (*Stream) ProtoMessage() {}
-func (*Stream) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{10}
-}
-func (m *Stream) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Stream.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Stream) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Stream.Merge(m, src)
-}
-func (m *Stream) XXX_Size() int {
- return m.Size()
-}
-func (m *Stream) XXX_DiscardUnknown() {
- xxx_messageInfo_Stream.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Stream proto.InternalMessageInfo
-
-func (m *Stream) GetId() []byte {
- if m != nil {
- return m.Id
- }
- return nil
-}
-
-func (m *Stream) GetProtocol() string {
- if m != nil {
- return m.Protocol
- }
- return ""
-}
-
-func (m *Stream) GetRole() Role {
- if m != nil {
- return m.Role
- }
- return Role_INITIATOR
-}
-
-func (m *Stream) GetTraffic() *Traffic {
- if m != nil {
- return m.Traffic
- }
- return nil
-}
-
-func (m *Stream) GetConn() *Stream_ConnectionRef {
- if m != nil {
- return m.Conn
- }
- return nil
-}
-
-func (m *Stream) GetTimeline() *Stream_Timeline {
- if m != nil {
- return m.Timeline
- }
- return nil
-}
-
-func (m *Stream) GetStatus() Status {
- if m != nil {
- return m.Status
- }
- return Status_ACTIVE
-}
-
-func (m *Stream) GetLatencyNs() uint64 {
- if m != nil {
- return m.LatencyNs
- }
- return 0
-}
-
-func (m *Stream) GetUserProvidedTags() []string {
- if m != nil {
- return m.UserProvidedTags
- }
- return nil
-}
-
-type Stream_ConnectionRef struct {
- // Types that are valid to be assigned to Connection:
- // *Stream_ConnectionRef_Conn
- // *Stream_ConnectionRef_ConnId
- Connection isStream_ConnectionRef_Connection `protobuf_oneof:"connection"`
-}
-
-func (m *Stream_ConnectionRef) Reset() { *m = Stream_ConnectionRef{} }
-func (m *Stream_ConnectionRef) String() string { return proto.CompactTextString(m) }
-func (*Stream_ConnectionRef) ProtoMessage() {}
-func (*Stream_ConnectionRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{10, 0}
-}
-func (m *Stream_ConnectionRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Stream_ConnectionRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Stream_ConnectionRef.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Stream_ConnectionRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Stream_ConnectionRef.Merge(m, src)
-}
-func (m *Stream_ConnectionRef) XXX_Size() int {
- return m.Size()
-}
-func (m *Stream_ConnectionRef) XXX_DiscardUnknown() {
- xxx_messageInfo_Stream_ConnectionRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Stream_ConnectionRef proto.InternalMessageInfo
-
-type isStream_ConnectionRef_Connection interface {
- isStream_ConnectionRef_Connection()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Stream_ConnectionRef_Conn struct {
- Conn *Connection `protobuf:"bytes,1,opt,name=conn,proto3,oneof" json:"conn,omitempty"`
-}
-type Stream_ConnectionRef_ConnId struct {
- ConnId []byte `protobuf:"bytes,2,opt,name=conn_id,json=connId,proto3,oneof" json:"conn_id,omitempty"`
-}
-
-func (*Stream_ConnectionRef_Conn) isStream_ConnectionRef_Connection() {}
-func (*Stream_ConnectionRef_ConnId) isStream_ConnectionRef_Connection() {}
-
-func (m *Stream_ConnectionRef) GetConnection() isStream_ConnectionRef_Connection {
- if m != nil {
- return m.Connection
- }
- return nil
-}
-
-func (m *Stream_ConnectionRef) GetConn() *Connection {
- if x, ok := m.GetConnection().(*Stream_ConnectionRef_Conn); ok {
- return x.Conn
- }
- return nil
-}
-
-func (m *Stream_ConnectionRef) GetConnId() []byte {
- if x, ok := m.GetConnection().(*Stream_ConnectionRef_ConnId); ok {
- return x.ConnId
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Stream_ConnectionRef) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Stream_ConnectionRef_Conn)(nil),
- (*Stream_ConnectionRef_ConnId)(nil),
- }
-}
-
-// Timeline contains the timestamps (ms since epoch) of the well-known milestones of a stream.
-type Stream_Timeline struct {
- // the instant when the stream was opened.
- OpenTs uint64 `protobuf:"varint,1,opt,name=open_ts,json=openTs,proto3" json:"open_ts,omitempty"`
- // the instant when the stream was terminated.
- CloseTs uint64 `protobuf:"varint,2,opt,name=close_ts,json=closeTs,proto3" json:"close_ts,omitempty"`
-}
-
-func (m *Stream_Timeline) Reset() { *m = Stream_Timeline{} }
-func (m *Stream_Timeline) String() string { return proto.CompactTextString(m) }
-func (*Stream_Timeline) ProtoMessage() {}
-func (*Stream_Timeline) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{10, 1}
-}
-func (m *Stream_Timeline) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Stream_Timeline) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Stream_Timeline.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Stream_Timeline) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Stream_Timeline.Merge(m, src)
-}
-func (m *Stream_Timeline) XXX_Size() int {
- return m.Size()
-}
-func (m *Stream_Timeline) XXX_DiscardUnknown() {
- xxx_messageInfo_Stream_Timeline.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Stream_Timeline proto.InternalMessageInfo
-
-func (m *Stream_Timeline) GetOpenTs() uint64 {
- if m != nil {
- return m.OpenTs
- }
- return 0
-}
-
-func (m *Stream_Timeline) GetCloseTs() uint64 {
- if m != nil {
- return m.CloseTs
- }
- return 0
-}
-
-// DHT metrics and state.
-type DHT struct {
- // DHT protocol name
- Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"`
- // protocol enabled.
- Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"`
- // timestamp (ms since epoch) of start up.
- StartTs uint64 `protobuf:"varint,3,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
- // params of the dht.
- Params *DHT_Params `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
- // existing, intantiated buckets and their contents
- Buckets []*DHT_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"`
- // counts inbound queries received from other peers
- IncomingQueries *DHT_QueryGauge `protobuf:"bytes,6,opt,name=incoming_queries,json=incomingQueries,proto3" json:"incoming_queries,omitempty"`
- // counts outbound queries dispatched by this peer
- OutgoingQueries *DHT_QueryGauge `protobuf:"bytes,7,opt,name=outgoing_queries,json=outgoingQueries,proto3" json:"outgoing_queries,omitempty"`
-}
-
-func (m *DHT) Reset() { *m = DHT{} }
-func (m *DHT) String() string { return proto.CompactTextString(m) }
-func (*DHT) ProtoMessage() {}
-func (*DHT) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{11}
-}
-func (m *DHT) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DHT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DHT.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DHT) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DHT.Merge(m, src)
-}
-func (m *DHT) XXX_Size() int {
- return m.Size()
-}
-func (m *DHT) XXX_DiscardUnknown() {
- xxx_messageInfo_DHT.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DHT proto.InternalMessageInfo
-
-func (m *DHT) GetProtocol() string {
- if m != nil {
- return m.Protocol
- }
- return ""
-}
-
-func (m *DHT) GetEnabled() bool {
- if m != nil {
- return m.Enabled
- }
- return false
-}
-
-func (m *DHT) GetStartTs() uint64 {
- if m != nil {
- return m.StartTs
- }
- return 0
-}
-
-func (m *DHT) GetParams() *DHT_Params {
- if m != nil {
- return m.Params
- }
- return nil
-}
-
-func (m *DHT) GetBuckets() []*DHT_Bucket {
- if m != nil {
- return m.Buckets
- }
- return nil
-}
-
-func (m *DHT) GetIncomingQueries() *DHT_QueryGauge {
- if m != nil {
- return m.IncomingQueries
- }
- return nil
-}
-
-func (m *DHT) GetOutgoingQueries() *DHT_QueryGauge {
- if m != nil {
- return m.OutgoingQueries
- }
- return nil
-}
-
-type DHT_Params struct {
- // routing table bucket size.
- K uint64 `protobuf:"varint,1,opt,name=k,proto3" json:"k,omitempty"`
- // concurrency of asynchronous requests.
- Alpha uint64 `protobuf:"varint,2,opt,name=alpha,proto3" json:"alpha,omitempty"`
- // number of disjoint paths to use.
- DisjointPaths uint64 `protobuf:"varint,3,opt,name=disjoint_paths,json=disjointPaths,proto3" json:"disjoint_paths,omitempty"`
- // number of peers closest to a target that must have responded
- // in order for a given query path to complete
- Beta uint64 `protobuf:"varint,4,opt,name=beta,proto3" json:"beta,omitempty"`
-}
-
-func (m *DHT_Params) Reset() { *m = DHT_Params{} }
-func (m *DHT_Params) String() string { return proto.CompactTextString(m) }
-func (*DHT_Params) ProtoMessage() {}
-func (*DHT_Params) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{11, 0}
-}
-func (m *DHT_Params) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DHT_Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DHT_Params.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DHT_Params) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DHT_Params.Merge(m, src)
-}
-func (m *DHT_Params) XXX_Size() int {
- return m.Size()
-}
-func (m *DHT_Params) XXX_DiscardUnknown() {
- xxx_messageInfo_DHT_Params.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DHT_Params proto.InternalMessageInfo
-
-func (m *DHT_Params) GetK() uint64 {
- if m != nil {
- return m.K
- }
- return 0
-}
-
-func (m *DHT_Params) GetAlpha() uint64 {
- if m != nil {
- return m.Alpha
- }
- return 0
-}
-
-func (m *DHT_Params) GetDisjointPaths() uint64 {
- if m != nil {
- return m.DisjointPaths
- }
- return 0
-}
-
-func (m *DHT_Params) GetBeta() uint64 {
- if m != nil {
- return m.Beta
- }
- return 0
-}
-
-// Peer in DHT
-type DHT_PeerInDHT struct {
- // the peer id of the host system
- PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
- // the peer's status when data snapshot is taken
- Status DHT_PeerInDHT_Status `protobuf:"varint,2,opt,name=status,proto3,enum=pb.DHT_PeerInDHT_Status" json:"status,omitempty"`
- // age in bucket (ms)
- AgeInBucket uint32 `protobuf:"varint,3,opt,name=age_in_bucket,json=ageInBucket,proto3" json:"age_in_bucket,omitempty"`
-}
-
-func (m *DHT_PeerInDHT) Reset() { *m = DHT_PeerInDHT{} }
-func (m *DHT_PeerInDHT) String() string { return proto.CompactTextString(m) }
-func (*DHT_PeerInDHT) ProtoMessage() {}
-func (*DHT_PeerInDHT) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{11, 1}
-}
-func (m *DHT_PeerInDHT) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DHT_PeerInDHT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DHT_PeerInDHT.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DHT_PeerInDHT) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DHT_PeerInDHT.Merge(m, src)
-}
-func (m *DHT_PeerInDHT) XXX_Size() int {
- return m.Size()
-}
-func (m *DHT_PeerInDHT) XXX_DiscardUnknown() {
- xxx_messageInfo_DHT_PeerInDHT.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DHT_PeerInDHT proto.InternalMessageInfo
-
-func (m *DHT_PeerInDHT) GetPeerId() string {
- if m != nil {
- return m.PeerId
- }
- return ""
-}
-
-func (m *DHT_PeerInDHT) GetStatus() DHT_PeerInDHT_Status {
- if m != nil {
- return m.Status
- }
- return DHT_PeerInDHT_ACTIVE
-}
-
-func (m *DHT_PeerInDHT) GetAgeInBucket() uint32 {
- if m != nil {
- return m.AgeInBucket
- }
- return 0
-}
-
-// A "k-bucket" containing peers of a certain kadamelia distance
-type DHT_Bucket struct {
- // CPL (Common Prefix Length) is the length of the common prefix
- // between the ids of every peer in this bucket and the DHT peer id
- Cpl uint32 `protobuf:"varint,1,opt,name=cpl,proto3" json:"cpl,omitempty"`
- // Peers associated with this bucket
- Peers []*DHT_PeerInDHT `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"`
-}
-
-func (m *DHT_Bucket) Reset() { *m = DHT_Bucket{} }
-func (m *DHT_Bucket) String() string { return proto.CompactTextString(m) }
-func (*DHT_Bucket) ProtoMessage() {}
-func (*DHT_Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{11, 2}
-}
-func (m *DHT_Bucket) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DHT_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DHT_Bucket.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DHT_Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DHT_Bucket.Merge(m, src)
-}
-func (m *DHT_Bucket) XXX_Size() int {
- return m.Size()
-}
-func (m *DHT_Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_DHT_Bucket.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DHT_Bucket proto.InternalMessageInfo
-
-func (m *DHT_Bucket) GetCpl() uint32 {
- if m != nil {
- return m.Cpl
- }
- return 0
-}
-
-func (m *DHT_Bucket) GetPeers() []*DHT_PeerInDHT {
- if m != nil {
- return m.Peers
- }
- return nil
-}
-
-// Counters of query events, by status
-type DHT_QueryGauge struct {
- // Cumulative counter of queries with "SUCCESS" status
- Success uint64 `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
- // Cumulative counter of queries with "ERROR" status
- Error uint64 `protobuf:"varint,2,opt,name=error,proto3" json:"error,omitempty"`
- // Cumulative counter of queries with "TIMEOUT" status
- Timeout uint64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
-}
-
-func (m *DHT_QueryGauge) Reset() { *m = DHT_QueryGauge{} }
-func (m *DHT_QueryGauge) String() string { return proto.CompactTextString(m) }
-func (*DHT_QueryGauge) ProtoMessage() {}
-func (*DHT_QueryGauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{11, 3}
-}
-func (m *DHT_QueryGauge) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DHT_QueryGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DHT_QueryGauge.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DHT_QueryGauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DHT_QueryGauge.Merge(m, src)
-}
-func (m *DHT_QueryGauge) XXX_Size() int {
- return m.Size()
-}
-func (m *DHT_QueryGauge) XXX_DiscardUnknown() {
- xxx_messageInfo_DHT_QueryGauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DHT_QueryGauge proto.InternalMessageInfo
-
-func (m *DHT_QueryGauge) GetSuccess() uint64 {
- if m != nil {
- return m.Success
- }
- return 0
-}
-
-func (m *DHT_QueryGauge) GetError() uint64 {
- if m != nil {
- return m.Error
- }
- return 0
-}
-
-func (m *DHT_QueryGauge) GetTimeout() uint64 {
- if m != nil {
- return m.Timeout
- }
- return 0
-}
-
-// Subsystems encapsulates all instrumented subsystems for a libp2p host.
-type Subsystems struct {
- // connections data, source agnostic but currently only supports the Swarm subsystem
- Connections []*Connection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections,omitempty"`
- // the DHT subsystem.
- Dht *DHT `protobuf:"bytes,2,opt,name=dht,proto3" json:"dht,omitempty"`
-}
-
-func (m *Subsystems) Reset() { *m = Subsystems{} }
-func (m *Subsystems) String() string { return proto.CompactTextString(m) }
-func (*Subsystems) ProtoMessage() {}
-func (*Subsystems) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{12}
-}
-func (m *Subsystems) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subsystems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Subsystems.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Subsystems) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subsystems.Merge(m, src)
-}
-func (m *Subsystems) XXX_Size() int {
- return m.Size()
-}
-func (m *Subsystems) XXX_DiscardUnknown() {
- xxx_messageInfo_Subsystems.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subsystems proto.InternalMessageInfo
-
-func (m *Subsystems) GetConnections() []*Connection {
- if m != nil {
- return m.Connections
- }
- return nil
-}
-
-func (m *Subsystems) GetDht() *DHT {
- if m != nil {
- return m.Dht
- }
- return nil
-}
-
-// Connections and streams output for a time interval is one of these.
-type State struct {
- // list of connections
- Subsystems *Subsystems `protobuf:"bytes,1,opt,name=subsystems,proto3" json:"subsystems,omitempty"`
- // overall traffic for this peer
- Traffic *Traffic `protobuf:"bytes,2,opt,name=traffic,proto3" json:"traffic,omitempty"`
- // moment this data snapshot and instantaneous values were taken
- InstantTs uint64 `protobuf:"varint,3,opt,name=instant_ts,json=instantTs,proto3" json:"instant_ts,omitempty"`
- // start of included data collection (cumulative values counted from here)
- StartTs uint64 `protobuf:"varint,4,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"`
- // length of time up to instant_ts covered by this data snapshot
- SnapshotDurationMs uint32 `protobuf:"varint,5,opt,name=snapshot_duration_ms,json=snapshotDurationMs,proto3" json:"snapshot_duration_ms,omitempty"`
-}
-
-func (m *State) Reset() { *m = State{} }
-func (m *State) String() string { return proto.CompactTextString(m) }
-func (*State) ProtoMessage() {}
-func (*State) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{13}
-}
-func (m *State) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_State.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *State) XXX_Merge(src proto.Message) {
- xxx_messageInfo_State.Merge(m, src)
-}
-func (m *State) XXX_Size() int {
- return m.Size()
-}
-func (m *State) XXX_DiscardUnknown() {
- xxx_messageInfo_State.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_State proto.InternalMessageInfo
-
-func (m *State) GetSubsystems() *Subsystems {
- if m != nil {
- return m.Subsystems
- }
- return nil
-}
-
-func (m *State) GetTraffic() *Traffic {
- if m != nil {
- return m.Traffic
- }
- return nil
-}
-
-func (m *State) GetInstantTs() uint64 {
- if m != nil {
- return m.InstantTs
- }
- return 0
-}
-
-func (m *State) GetStartTs() uint64 {
- if m != nil {
- return m.StartTs
- }
- return 0
-}
-
-func (m *State) GetSnapshotDurationMs() uint32 {
- if m != nil {
- return m.SnapshotDurationMs
- }
- return 0
-}
-
-// Event
-type Event struct {
- // definition of event type, containing only `name` unless this is first encounter of novel event
- Type *EventType `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- // time this event occurred (ms since epoch)
- Ts uint64 `protobuf:"varint,2,opt,name=ts,proto3" json:"ts,omitempty"`
- // stringified json; top-level keys and value types match EventProperty definitions
- Content string `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"`
-}
-
-func (m *Event) Reset() { *m = Event{} }
-func (m *Event) String() string { return proto.CompactTextString(m) }
-func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{14}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Event.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func (m *Event) GetType() *EventType {
- if m != nil {
- return m.Type
- }
- return nil
-}
-
-func (m *Event) GetTs() uint64 {
- if m != nil {
- return m.Ts
- }
- return 0
-}
-
-func (m *Event) GetContent() string {
- if m != nil {
- return m.Content
- }
- return ""
-}
-
-// ServerMessage wraps messages to be sent to clients to allow extension
-// based on new types of data sources
-type ServerMessage struct {
- // Version of this protobuf.
- Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
- // The payload this message contains.
- //
- // Types that are valid to be assigned to Payload:
- // *ServerMessage_State
- // *ServerMessage_Runtime
- // *ServerMessage_Event
- // *ServerMessage_Response
- // *ServerMessage_Notice
- Payload isServerMessage_Payload `protobuf_oneof:"payload"`
-}
-
-func (m *ServerMessage) Reset() { *m = ServerMessage{} }
-func (m *ServerMessage) String() string { return proto.CompactTextString(m) }
-func (*ServerMessage) ProtoMessage() {}
-func (*ServerMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{15}
-}
-func (m *ServerMessage) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ServerMessage.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ServerMessage) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerMessage.Merge(m, src)
-}
-func (m *ServerMessage) XXX_Size() int {
- return m.Size()
-}
-func (m *ServerMessage) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerMessage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerMessage proto.InternalMessageInfo
-
-type isServerMessage_Payload interface {
- isServerMessage_Payload()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type ServerMessage_State struct {
- State *State `protobuf:"bytes,2,opt,name=state,proto3,oneof" json:"state,omitempty"`
-}
-type ServerMessage_Runtime struct {
- Runtime *Runtime `protobuf:"bytes,3,opt,name=runtime,proto3,oneof" json:"runtime,omitempty"`
-}
-type ServerMessage_Event struct {
- Event *Event `protobuf:"bytes,4,opt,name=event,proto3,oneof" json:"event,omitempty"`
-}
-type ServerMessage_Response struct {
- Response *CommandResponse `protobuf:"bytes,5,opt,name=response,proto3,oneof" json:"response,omitempty"`
-}
-type ServerMessage_Notice struct {
- Notice *ServerNotice `protobuf:"bytes,6,opt,name=notice,proto3,oneof" json:"notice,omitempty"`
-}
-
-func (*ServerMessage_State) isServerMessage_Payload() {}
-func (*ServerMessage_Runtime) isServerMessage_Payload() {}
-func (*ServerMessage_Event) isServerMessage_Payload() {}
-func (*ServerMessage_Response) isServerMessage_Payload() {}
-func (*ServerMessage_Notice) isServerMessage_Payload() {}
-
-func (m *ServerMessage) GetPayload() isServerMessage_Payload {
- if m != nil {
- return m.Payload
- }
- return nil
-}
-
-func (m *ServerMessage) GetVersion() *Version {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-func (m *ServerMessage) GetState() *State {
- if x, ok := m.GetPayload().(*ServerMessage_State); ok {
- return x.State
- }
- return nil
-}
-
-func (m *ServerMessage) GetRuntime() *Runtime {
- if x, ok := m.GetPayload().(*ServerMessage_Runtime); ok {
- return x.Runtime
- }
- return nil
-}
-
-func (m *ServerMessage) GetEvent() *Event {
- if x, ok := m.GetPayload().(*ServerMessage_Event); ok {
- return x.Event
- }
- return nil
-}
-
-func (m *ServerMessage) GetResponse() *CommandResponse {
- if x, ok := m.GetPayload().(*ServerMessage_Response); ok {
- return x.Response
- }
- return nil
-}
-
-func (m *ServerMessage) GetNotice() *ServerNotice {
- if x, ok := m.GetPayload().(*ServerMessage_Notice); ok {
- return x.Notice
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ServerMessage) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ServerMessage_State)(nil),
- (*ServerMessage_Runtime)(nil),
- (*ServerMessage_Event)(nil),
- (*ServerMessage_Response)(nil),
- (*ServerMessage_Notice)(nil),
- }
-}
-
-// Configuration encapsulates configuration fields for the protocol and commands.
-type Configuration struct {
- RetentionPeriodMs uint64 `protobuf:"varint,1,opt,name=retention_period_ms,json=retentionPeriodMs,proto3" json:"retention_period_ms,omitempty"`
- StateSnapshotIntervalMs uint64 `protobuf:"varint,2,opt,name=state_snapshot_interval_ms,json=stateSnapshotIntervalMs,proto3" json:"state_snapshot_interval_ms,omitempty"`
-}
-
-func (m *Configuration) Reset() { *m = Configuration{} }
-func (m *Configuration) String() string { return proto.CompactTextString(m) }
-func (*Configuration) ProtoMessage() {}
-func (*Configuration) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{16}
-}
-func (m *Configuration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Configuration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Configuration.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Configuration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Configuration.Merge(m, src)
-}
-func (m *Configuration) XXX_Size() int {
- return m.Size()
-}
-func (m *Configuration) XXX_DiscardUnknown() {
- xxx_messageInfo_Configuration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Configuration proto.InternalMessageInfo
-
-func (m *Configuration) GetRetentionPeriodMs() uint64 {
- if m != nil {
- return m.RetentionPeriodMs
- }
- return 0
-}
-
-func (m *Configuration) GetStateSnapshotIntervalMs() uint64 {
- if m != nil {
- return m.StateSnapshotIntervalMs
- }
- return 0
-}
-
-// ClientCommand is a command sent from the client to the server.
-type ClientCommand struct {
- Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
- Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
- Command ClientCommand_Command `protobuf:"varint,3,opt,name=command,proto3,enum=pb.ClientCommand_Command" json:"command,omitempty"`
- Source ClientCommand_Source `protobuf:"varint,4,opt,name=source,proto3,enum=pb.ClientCommand_Source" json:"source,omitempty"`
- Config *Configuration `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"`
-}
-
-func (m *ClientCommand) Reset() { *m = ClientCommand{} }
-func (m *ClientCommand) String() string { return proto.CompactTextString(m) }
-func (*ClientCommand) ProtoMessage() {}
-func (*ClientCommand) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{17}
-}
-func (m *ClientCommand) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClientCommand) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ClientCommand.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ClientCommand) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClientCommand.Merge(m, src)
-}
-func (m *ClientCommand) XXX_Size() int {
- return m.Size()
-}
-func (m *ClientCommand) XXX_DiscardUnknown() {
- xxx_messageInfo_ClientCommand.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClientCommand proto.InternalMessageInfo
-
-func (m *ClientCommand) GetVersion() *Version {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-func (m *ClientCommand) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *ClientCommand) GetCommand() ClientCommand_Command {
- if m != nil {
- return m.Command
- }
- return ClientCommand_HELLO
-}
-
-func (m *ClientCommand) GetSource() ClientCommand_Source {
- if m != nil {
- return m.Source
- }
- return ClientCommand_STATE
-}
-
-func (m *ClientCommand) GetConfig() *Configuration {
- if m != nil {
- return m.Config
- }
- return nil
-}
-
-// CommandResponse is a response to a command sent by the client.
-type CommandResponse struct {
- Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- Result CommandResponse_Result `protobuf:"varint,2,opt,name=result,proto3,enum=pb.CommandResponse_Result" json:"result,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
- // effective_config is the effective configuration the server holds for
- // this connection. It is returned in response to HELLO and UPDATE_CONFIG
- // commands.
- EffectiveConfig *Configuration `protobuf:"bytes,4,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"`
-}
-
-func (m *CommandResponse) Reset() { *m = CommandResponse{} }
-func (m *CommandResponse) String() string { return proto.CompactTextString(m) }
-func (*CommandResponse) ProtoMessage() {}
-func (*CommandResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{18}
-}
-func (m *CommandResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CommandResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CommandResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CommandResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommandResponse.Merge(m, src)
-}
-func (m *CommandResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *CommandResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CommandResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommandResponse proto.InternalMessageInfo
-
-func (m *CommandResponse) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *CommandResponse) GetResult() CommandResponse_Result {
- if m != nil {
- return m.Result
- }
- return CommandResponse_OK
-}
-
-func (m *CommandResponse) GetError() string {
- if m != nil {
- return m.Error
- }
- return ""
-}
-
-func (m *CommandResponse) GetEffectiveConfig() *Configuration {
- if m != nil {
- return m.EffectiveConfig
- }
- return nil
-}
-
-// ServerNotice represents a NOTICE sent from the server to the client.
-type ServerNotice struct {
- Kind ServerNotice_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=pb.ServerNotice_Kind" json:"kind,omitempty"`
-}
-
-func (m *ServerNotice) Reset() { *m = ServerNotice{} }
-func (m *ServerNotice) String() string { return proto.CompactTextString(m) }
-func (*ServerNotice) ProtoMessage() {}
-func (*ServerNotice) Descriptor() ([]byte, []int) {
- return fileDescriptor_53a8bedf9a75e10a, []int{19}
-}
-func (m *ServerNotice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServerNotice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ServerNotice.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ServerNotice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerNotice.Merge(m, src)
-}
-func (m *ServerNotice) XXX_Size() int {
- return m.Size()
-}
-func (m *ServerNotice) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerNotice.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerNotice proto.InternalMessageInfo
-
-func (m *ServerNotice) GetKind() ServerNotice_Kind {
- if m != nil {
- return m.Kind
- }
- return ServerNotice_DISCARDING_EVENTS
-}
-
-func init() {
- proto.RegisterEnum("pb.Status", Status_name, Status_value)
- proto.RegisterEnum("pb.Role", Role_name, Role_value)
- proto.RegisterEnum("pb.EventType_EventProperty_PropertyType", EventType_EventProperty_PropertyType_name, EventType_EventProperty_PropertyType_value)
- proto.RegisterEnum("pb.DHT_PeerInDHT_Status", DHT_PeerInDHT_Status_name, DHT_PeerInDHT_Status_value)
- proto.RegisterEnum("pb.ClientCommand_Source", ClientCommand_Source_name, ClientCommand_Source_value)
- proto.RegisterEnum("pb.ClientCommand_Command", ClientCommand_Command_name, ClientCommand_Command_value)
- proto.RegisterEnum("pb.CommandResponse_Result", CommandResponse_Result_name, CommandResponse_Result_value)
- proto.RegisterEnum("pb.ServerNotice_Kind", ServerNotice_Kind_name, ServerNotice_Kind_value)
- proto.RegisterType((*Version)(nil), "pb.Version")
- proto.RegisterType((*ResultCounter)(nil), "pb.ResultCounter")
- proto.RegisterType((*SlidingCounter)(nil), "pb.SlidingCounter")
- proto.RegisterType((*DataGauge)(nil), "pb.DataGauge")
- proto.RegisterType((*EventType)(nil), "pb.EventType")
- proto.RegisterType((*EventType_EventProperty)(nil), "pb.EventType.EventProperty")
- proto.RegisterType((*Runtime)(nil), "pb.Runtime")
- proto.RegisterType((*EndpointPair)(nil), "pb.EndpointPair")
- proto.RegisterType((*Traffic)(nil), "pb.Traffic")
- proto.RegisterType((*StreamList)(nil), "pb.StreamList")
- proto.RegisterType((*Connection)(nil), "pb.Connection")
- proto.RegisterType((*Connection_Timeline)(nil), "pb.Connection.Timeline")
- proto.RegisterType((*Connection_Attributes)(nil), "pb.Connection.Attributes")
- proto.RegisterType((*Stream)(nil), "pb.Stream")
- proto.RegisterType((*Stream_ConnectionRef)(nil), "pb.Stream.ConnectionRef")
- proto.RegisterType((*Stream_Timeline)(nil), "pb.Stream.Timeline")
- proto.RegisterType((*DHT)(nil), "pb.DHT")
- proto.RegisterType((*DHT_Params)(nil), "pb.DHT.Params")
- proto.RegisterType((*DHT_PeerInDHT)(nil), "pb.DHT.PeerInDHT")
- proto.RegisterType((*DHT_Bucket)(nil), "pb.DHT.Bucket")
- proto.RegisterType((*DHT_QueryGauge)(nil), "pb.DHT.QueryGauge")
- proto.RegisterType((*Subsystems)(nil), "pb.Subsystems")
- proto.RegisterType((*State)(nil), "pb.State")
- proto.RegisterType((*Event)(nil), "pb.Event")
- proto.RegisterType((*ServerMessage)(nil), "pb.ServerMessage")
- proto.RegisterType((*Configuration)(nil), "pb.Configuration")
- proto.RegisterType((*ClientCommand)(nil), "pb.ClientCommand")
- proto.RegisterType((*CommandResponse)(nil), "pb.CommandResponse")
- proto.RegisterType((*ServerNotice)(nil), "pb.ServerNotice")
-}
-
-func init() { proto.RegisterFile("introspection.proto", fileDescriptor_53a8bedf9a75e10a) }
-
-var fileDescriptor_53a8bedf9a75e10a = []byte{
- // 2207 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x5b, 0x6f, 0x1b, 0xc7,
- 0x15, 0xe6, 0xfd, 0x72, 0x78, 0xf1, 0x6a, 0x9c, 0x20, 0xb4, 0x52, 0xab, 0xf6, 0xc6, 0x49, 0x14,
- 0xc3, 0x50, 0x6d, 0x3a, 0x06, 0x02, 0x34, 0x0d, 0x20, 0x89, 0x5b, 0x8b, 0x8e, 0x44, 0xd1, 0x43,
- 0xca, 0x68, 0xfb, 0xd0, 0xc5, 0x8a, 0x3b, 0x22, 0xb7, 0x22, 0x77, 0xb7, 0x33, 0x43, 0x25, 0x02,
- 0xfa, 0xd0, 0x7f, 0xd0, 0xfe, 0x82, 0xfe, 0x86, 0x3e, 0xf7, 0xad, 0x40, 0x1f, 0x8a, 0x3e, 0xe5,
- 0xb1, 0x68, 0x5f, 0x0a, 0xfb, 0x29, 0xff, 0xa2, 0x38, 0x33, 0xb3, 0x17, 0xc9, 0x97, 0xa6, 0x6f,
- 0x7b, 0xce, 0xf7, 0x9d, 0xb3, 0xb3, 0xe7, 0x36, 0x87, 0x84, 0x9b, 0x41, 0x28, 0x79, 0x24, 0x62,
- 0x36, 0x93, 0x41, 0x14, 0xee, 0xc4, 0x3c, 0x92, 0x11, 0x29, 0xc5, 0xa7, 0xf6, 0x47, 0x50, 0x7f,
- 0xc1, 0xb8, 0x08, 0xa2, 0x90, 0xf4, 0xa0, 0x7e, 0xa1, 0x1f, 0x7b, 0xc5, 0x3b, 0xc5, 0xed, 0x0e,
- 0x4d, 0x44, 0xfb, 0x29, 0x74, 0x28, 0x13, 0xeb, 0xa5, 0xdc, 0x8f, 0xd6, 0xa1, 0x64, 0x9c, 0xbc,
- 0x07, 0x55, 0x19, 0x49, 0x6f, 0x69, 0x88, 0x5a, 0x20, 0x5d, 0x28, 0x45, 0xe7, 0xbd, 0x92, 0x52,
- 0x95, 0xa2, 0x73, 0x62, 0x41, 0x99, 0x71, 0xde, 0x2b, 0x2b, 0x05, 0x3e, 0xda, 0x7f, 0x2a, 0x41,
- 0x77, 0xb2, 0x0c, 0xfc, 0x20, 0x9c, 0x27, 0xae, 0x3e, 0x80, 0x7a, 0x74, 0xc1, 0xb8, 0xfb, 0x68,
- 0x65, 0x9c, 0xd5, 0x50, 0x7c, 0xb4, 0x4a, 0x81, 0x27, 0x2b, 0xe3, 0x52, 0x01, 0x4f, 0x56, 0xe4,
- 0x16, 0x34, 0xb4, 0xc5, 0x93, 0x95, 0xf1, 0xad, 0x88, 0x8f, 0x72, 0xd0, 0xe3, 0x87, 0xab, 0x5e,
- 0x25, 0x83, 0x1e, 0x3f, 0xcc, 0x59, 0x2d, 0x78, 0xaf, 0x9a, 0xb3, 0x5a, 0xf0, 0x14, 0xea, 0x2f,
- 0x78, 0xaf, 0x96, 0x41, 0xfd, 0x1c, 0xf4, 0xf9, 0x82, 0xf7, 0xea, 0x19, 0xf4, 0x79, 0x0e, 0xfa,
- 0x62, 0xc1, 0x7b, 0x8d, 0x0c, 0xfa, 0x62, 0xc1, 0xc9, 0x87, 0xd0, 0xd4, 0xef, 0x42, 0x8f, 0x4d,
- 0x85, 0x29, 0x2e, 0xca, 0x29, 0xd8, 0x47, 0x9f, 0x90, 0x81, 0x28, 0xdb, 0xa7, 0xd0, 0x1c, 0x78,
- 0xd2, 0x7b, 0xea, 0xad, 0xe7, 0x0c, 0x99, 0xb3, 0xf5, 0xca, 0x3d, 0xbd, 0x94, 0x4c, 0xa8, 0xe0,
- 0x54, 0x68, 0x63, 0xb6, 0x5e, 0xed, 0xa1, 0x4c, 0x7e, 0x0c, 0x2d, 0x04, 0x63, 0x6f, 0x76, 0xce,
- 0xa4, 0x50, 0x21, 0xaa, 0x50, 0x98, 0xad, 0x57, 0x63, 0xad, 0xc1, 0xf8, 0x05, 0xa1, 0x90, 0xee,
- 0xe9, 0x37, 0x2a, 0x4a, 0x15, 0x5a, 0x43, 0x71, 0xef, 0x1b, 0xfb, 0xaf, 0x25, 0x68, 0x3a, 0x17,
- 0x2c, 0x94, 0xd3, 0xcb, 0x98, 0x11, 0x02, 0x95, 0xd0, 0x5b, 0x31, 0xe5, 0xbf, 0x49, 0xd5, 0x33,
- 0xd9, 0x83, 0x6e, 0xcc, 0xa3, 0x98, 0x71, 0x79, 0xe9, 0xca, 0xcb, 0x98, 0xa1, 0xfb, 0xf2, 0x76,
- 0xab, 0xff, 0xe1, 0x4e, 0x7c, 0xba, 0x93, 0x9a, 0xea, 0xa7, 0xb1, 0x21, 0xd2, 0x4e, 0x62, 0x82,
- 0x98, 0xd8, 0xfc, 0x77, 0x11, 0x3a, 0x57, 0x08, 0x6f, 0x7c, 0xd3, 0x97, 0x50, 0xc1, 0x17, 0xa8,
- 0xe3, 0x77, 0xfb, 0xdb, 0xef, 0xf0, 0xbf, 0x33, 0xce, 0xb9, 0xa7, 0xca, 0x8a, 0xdc, 0x85, 0xf6,
- 0xc2, 0x13, 0xee, 0x6a, 0xbd, 0x94, 0x41, 0xbc, 0x64, 0xea, 0x3b, 0x1b, 0xb4, 0xb5, 0xf0, 0xc4,
- 0x91, 0x51, 0xd9, 0x27, 0xd0, 0xce, 0x1b, 0x12, 0x80, 0xda, 0x64, 0x4a, 0x87, 0xa3, 0xa7, 0x56,
- 0x01, 0x9f, 0x47, 0x27, 0x47, 0x7b, 0x0e, 0xb5, 0x8a, 0xa4, 0x01, 0x95, 0xe9, 0xf0, 0xc8, 0xb1,
- 0x00, 0xb5, 0x63, 0xc7, 0xa1, 0xc3, 0x81, 0xd5, 0x22, 0x1d, 0x68, 0x1e, 0x9d, 0x1c, 0x4e, 0x87,
- 0xbb, 0x83, 0x01, 0xb5, 0xda, 0x48, 0x7a, 0x36, 0x39, 0x1e, 0x59, 0xbf, 0xb2, 0xff, 0x5c, 0x84,
- 0x3a, 0x5d, 0x87, 0x32, 0x58, 0x31, 0xf2, 0x09, 0x74, 0x83, 0x55, 0xbc, 0x64, 0x2b, 0x16, 0x4a,
- 0x4f, 0x26, 0xed, 0xd3, 0xa4, 0xd7, 0xb4, 0xf9, 0xfe, 0x2a, 0x29, 0x42, 0x22, 0x92, 0x4d, 0x68,
- 0xc4, 0x4b, 0x4f, 0x9e, 0x45, 0x5c, 0x57, 0x74, 0x93, 0xa6, 0x32, 0xa6, 0x31, 0x66, 0x8c, 0xbb,
- 0x81, 0xaf, 0x2a, 0xba, 0x49, 0x6b, 0x28, 0x0e, 0x7d, 0xb2, 0x03, 0x2d, 0x86, 0x01, 0x32, 0x19,
- 0xaa, 0xab, 0x0c, 0x75, 0xae, 0x44, 0x90, 0x02, 0x4b, 0x1e, 0x85, 0xfd, 0x0b, 0x68, 0x3b, 0xa1,
- 0x1f, 0x47, 0x41, 0x28, 0xc7, 0x5e, 0xc0, 0xc9, 0x47, 0xd0, 0x11, 0x7c, 0xa6, 0x83, 0xe7, 0xf9,
- 0x3e, 0x37, 0xa7, 0x6e, 0x0b, 0x3e, 0x3b, 0x4a, 0x74, 0x48, 0xf2, 0x85, 0xcc, 0x91, 0xf4, 0xc9,
- 0xdb, 0xbe, 0x90, 0x29, 0xc9, 0x9e, 0x43, 0x7d, 0xca, 0xbd, 0xb3, 0xb3, 0x60, 0x46, 0x1e, 0x00,
- 0x48, 0xfd, 0xe8, 0x06, 0x3a, 0x0e, 0xe6, 0x4c, 0x69, 0x55, 0xd3, 0xa6, 0x21, 0x0c, 0x43, 0xfc,
- 0x84, 0x84, 0x1d, 0xad, 0xa5, 0xf2, 0xfd, 0x1a, 0x3d, 0xf1, 0x77, 0xbc, 0x96, 0xf6, 0x73, 0x80,
- 0x89, 0xe4, 0xcc, 0x5b, 0x1d, 0x06, 0x42, 0x92, 0xdb, 0x00, 0x42, 0x49, 0x6e, 0xe0, 0x63, 0x7f,
- 0x94, 0xb7, 0xdb, 0xb4, 0xa9, 0x35, 0x43, 0x5f, 0x90, 0x7b, 0x50, 0xd7, 0x42, 0x52, 0xbd, 0x80,
- 0x8e, 0xb5, 0x3d, 0x4d, 0x20, 0xfb, 0x5f, 0x55, 0x80, 0xfd, 0x28, 0x0c, 0xf5, 0x60, 0xc4, 0x11,
- 0x16, 0xf8, 0xea, 0xdc, 0x6d, 0x5a, 0x0a, 0xfc, 0x7c, 0xf4, 0x4b, 0x57, 0xa2, 0x6f, 0x43, 0x4d,
- 0x48, 0x4f, 0xae, 0x85, 0x4a, 0x58, 0x37, 0x71, 0x8e, 0x1a, 0x6a, 0x10, 0x2c, 0x4f, 0xc9, 0xbd,
- 0x50, 0xc4, 0x11, 0x97, 0x49, 0xfe, 0xda, 0xb4, 0x95, 0xea, 0x54, 0x12, 0x9b, 0xcc, 0x24, 0x45,
- 0xa8, 0xb1, 0xd4, 0xea, 0x5b, 0x2a, 0x85, 0xb9, 0x4c, 0xd1, 0x8c, 0x42, 0x1e, 0x43, 0x03, 0x6b,
- 0x6e, 0x19, 0x84, 0x4c, 0x8d, 0xaa, 0x56, 0xff, 0x03, 0xa4, 0x67, 0x5f, 0xb0, 0x33, 0x35, 0x30,
- 0x4d, 0x89, 0xe4, 0x47, 0x50, 0xe1, 0xd1, 0x92, 0xa9, 0x01, 0xd6, 0xed, 0x37, 0xd0, 0x80, 0x46,
- 0x4b, 0x46, 0x95, 0x96, 0x7c, 0x0c, 0x75, 0x13, 0x62, 0x35, 0xc6, 0x5a, 0xfd, 0x16, 0x12, 0x4c,
- 0x42, 0x69, 0x82, 0x91, 0xc7, 0x50, 0xf7, 0xa4, 0xe4, 0xc1, 0xa9, 0x50, 0x13, 0xad, 0xd5, 0xbf,
- 0x75, 0xed, 0xc5, 0xbb, 0x0a, 0x5d, 0x4b, 0x26, 0x68, 0xc2, 0xc4, 0x14, 0x2d, 0x3d, 0xc9, 0xc2,
- 0xd9, 0xa5, 0x1b, 0x0a, 0x35, 0xec, 0x2a, 0xb4, 0x69, 0x34, 0x23, 0x41, 0xb6, 0xb3, 0x14, 0xb5,
- 0x94, 0xcf, 0x6e, 0x96, 0x22, 0x4c, 0x71, 0x9a, 0x26, 0x72, 0x0b, 0xea, 0xb3, 0x28, 0x0c, 0x31,
- 0x8a, 0x16, 0x46, 0xf1, 0xa0, 0x40, 0x6b, 0xa8, 0x18, 0xfa, 0xe4, 0x1e, 0x54, 0xf0, 0xa9, 0xb7,
- 0x91, 0x79, 0xc8, 0x4e, 0x75, 0x50, 0xa0, 0x0a, 0x25, 0x0f, 0x80, 0xac, 0x05, 0xe3, 0x6e, 0xcc,
- 0xa3, 0x8b, 0xc0, 0x67, 0xbe, 0x2b, 0xbd, 0xb9, 0xe8, 0xcd, 0xee, 0x94, 0xb7, 0x9b, 0xd4, 0x42,
- 0x64, 0x6c, 0x80, 0xa9, 0x37, 0x17, 0x9b, 0x2e, 0x34, 0x92, 0x38, 0xaa, 0x7b, 0x28, 0x66, 0xa1,
- 0x2b, 0x93, 0x19, 0x5c, 0x43, 0x71, 0xaa, 0x26, 0xf0, 0x3a, 0x9e, 0x73, 0x4f, 0x79, 0x4b, 0x27,
- 0x70, 0xa2, 0x9a, 0xe2, 0xa1, 0x1b, 0xb3, 0x65, 0x24, 0x18, 0xa2, 0x7a, 0x04, 0xd7, 0x95, 0x3c,
- 0x15, 0x9b, 0x23, 0x80, 0x2c, 0x5e, 0xe4, 0x0e, 0xb4, 0x92, 0x19, 0xf6, 0x2d, 0x4b, 0x1a, 0x31,
- 0xaf, 0x22, 0x5b, 0x00, 0x2c, 0x9c, 0xf1, 0xcb, 0x58, 0x66, 0xe3, 0x23, 0xa7, 0xd9, 0xeb, 0x42,
- 0x9b, 0xb3, 0xa5, 0x77, 0xc9, 0x7c, 0x17, 0xef, 0x92, 0x67, 0x95, 0x46, 0xdb, 0xb2, 0xec, 0xef,
- 0xcb, 0x50, 0xd3, 0xd1, 0x7c, 0xad, 0xb0, 0x71, 0xe4, 0xe0, 0x12, 0x30, 0x8b, 0x96, 0xc6, 0x5d,
- 0x2a, 0xa7, 0xf5, 0x52, 0xfe, 0x5f, 0xf5, 0x52, 0x79, 0x47, 0xbd, 0x3c, 0x30, 0x69, 0xd1, 0x45,
- 0xdd, 0xcb, 0x12, 0x9b, 0xcb, 0x0e, 0x65, 0x67, 0x26, 0x3d, 0x3f, 0x79, 0xad, 0xae, 0x6f, 0xe6,
- 0x2c, 0xde, 0x50, 0xd3, 0x59, 0xff, 0xd5, 0xdf, 0xda, 0x7f, 0x57, 0xab, 0xcf, 0xba, 0x5e, 0x7d,
- 0xff, 0x5f, 0x49, 0xfc, 0x1a, 0x3a, 0x57, 0x0e, 0x9e, 0xd6, 0x5d, 0xf1, 0x9d, 0x75, 0x97, 0x2b,
- 0xdc, 0xd2, 0xd5, 0xc2, 0xdd, 0x6b, 0x03, 0xcc, 0x52, 0x83, 0xcd, 0xaf, 0x7e, 0x48, 0xc9, 0xe5,
- 0x2b, 0xaa, 0x74, 0xa5, 0xa2, 0xec, 0xef, 0xab, 0x50, 0x1e, 0x1c, 0x4c, 0xaf, 0x24, 0xb6, 0x78,
- 0x2d, 0xb1, 0x3d, 0xa8, 0xb3, 0xd0, 0x3b, 0x5d, 0x32, 0x7d, 0x98, 0x06, 0x4d, 0x44, 0x74, 0x2c,
- 0xa4, 0xc7, 0x65, 0xae, 0x54, 0x95, 0x3c, 0x15, 0xe4, 0x13, 0xa8, 0xc5, 0x1e, 0xc7, 0x1e, 0xad,
- 0x64, 0x5f, 0x3a, 0x38, 0x98, 0xee, 0x8c, 0x95, 0x96, 0x1a, 0x14, 0x9b, 0xf9, 0x74, 0xad, 0x97,
- 0x91, 0xaa, 0x9a, 0xb7, 0x29, 0x71, 0x4f, 0xa9, 0x69, 0x02, 0x93, 0x9f, 0x81, 0x15, 0x84, 0xb3,
- 0x68, 0x15, 0x84, 0x73, 0xf7, 0xb7, 0x6b, 0xc6, 0x03, 0x26, 0x4c, 0xd2, 0x49, 0x62, 0xf2, 0x7c,
- 0xcd, 0xf8, 0xa5, 0xbe, 0x00, 0x6e, 0x24, 0xdc, 0xe7, 0x9a, 0x8a, 0xe6, 0xd1, 0x5a, 0xce, 0xa3,
- 0xbc, 0x79, 0xfd, 0xed, 0xe6, 0x09, 0xd7, 0x98, 0x6f, 0xce, 0xa1, 0xa6, 0x4f, 0x4e, 0xda, 0x50,
- 0x3c, 0x37, 0x01, 0x2e, 0x9e, 0xe3, 0x4e, 0xeb, 0x2d, 0xe3, 0x85, 0x67, 0x02, 0xab, 0x05, 0xf2,
- 0x31, 0x74, 0xfd, 0x40, 0xfc, 0x06, 0xa7, 0xaf, 0x1b, 0x7b, 0x72, 0x91, 0x84, 0xa7, 0x93, 0x68,
- 0xc7, 0xa8, 0xc4, 0xdd, 0xe6, 0x94, 0x49, 0x4f, 0x85, 0xa8, 0x42, 0xd5, 0xf3, 0xe6, 0x5f, 0x8a,
- 0xd0, 0x1c, 0xe3, 0x6d, 0x11, 0x62, 0x5e, 0x72, 0x37, 0x49, 0xf1, 0xca, 0x4d, 0xf2, 0x30, 0xad,
- 0x64, 0xbd, 0x04, 0xf5, 0xd2, 0xf8, 0x26, 0xb6, 0xd7, 0xeb, 0xda, 0x86, 0x8e, 0x37, 0x67, 0x6e,
- 0x10, 0xba, 0x3a, 0xa2, 0x66, 0x0b, 0x6e, 0x79, 0x73, 0x36, 0x0c, 0x75, 0xb0, 0xed, 0xaf, 0xb0,
- 0xf3, 0x15, 0x1b, 0xa0, 0xb6, 0xbb, 0x3f, 0x1d, 0xbe, 0x70, 0xac, 0x02, 0x69, 0x41, 0xfd, 0x68,
- 0x38, 0x99, 0xe0, 0xfa, 0x53, 0x24, 0x6d, 0x68, 0x50, 0xe7, 0x99, 0xb3, 0x3f, 0x75, 0x06, 0x56,
- 0x09, 0x57, 0x9d, 0xfd, 0xdd, 0xd1, 0x60, 0x38, 0xd8, 0x9d, 0x3a, 0x56, 0x79, 0x73, 0x1f, 0x6a,
- 0xda, 0x13, 0x6e, 0xf1, 0xb3, 0x38, 0xd9, 0xf4, 0xf1, 0x91, 0x7c, 0x0a, 0x55, 0x3c, 0x7b, 0x72,
- 0xaf, 0x6e, 0xbc, 0x76, 0x60, 0xaa, 0xf1, 0xcd, 0x17, 0x00, 0x59, 0x26, 0xb0, 0xfa, 0xc4, 0x7a,
- 0x36, 0x63, 0x22, 0xa9, 0xea, 0x44, 0xc4, 0xd0, 0x33, 0xce, 0x23, 0x9e, 0x84, 0x5e, 0x09, 0xc8,
- 0xc7, 0x76, 0xc7, 0xcd, 0xc0, 0x94, 0xa4, 0x11, 0xed, 0x5f, 0x02, 0x4c, 0xd6, 0xa7, 0xe2, 0x52,
- 0x48, 0xb6, 0x12, 0xe4, 0x21, 0xb4, 0xb2, 0x3e, 0xd2, 0x8b, 0xc0, 0x6b, 0xfd, 0x48, 0xf3, 0x14,
- 0x72, 0x0b, 0xca, 0xfe, 0x22, 0xd9, 0x37, 0xea, 0xe6, 0xf8, 0x14, 0x75, 0xf6, 0x3f, 0x8a, 0x50,
- 0xc5, 0xc0, 0x31, 0xb2, 0x03, 0x20, 0xd2, 0x97, 0xe4, 0xbb, 0x3c, 0x7b, 0x35, 0xcd, 0x31, 0xf2,
- 0x73, 0xb1, 0xf4, 0x8e, 0xb9, 0x78, 0x1b, 0x00, 0xf7, 0x70, 0x2f, 0xcc, 0xf5, 0x5a, 0xd3, 0x68,
- 0x74, 0x87, 0xa7, 0x8d, 0x58, 0xb9, 0xda, 0x88, 0x0f, 0xe1, 0x3d, 0x11, 0x7a, 0xb1, 0x58, 0x44,
- 0xd2, 0xf5, 0xd7, 0x5c, 0x2d, 0x95, 0xee, 0x4a, 0x98, 0x5f, 0x33, 0x24, 0xc1, 0x06, 0x06, 0x3a,
- 0x12, 0xf6, 0x14, 0xaa, 0x6a, 0x17, 0x24, 0x77, 0xcd, 0x9a, 0x9d, 0x5b, 0xc8, 0xb2, 0x25, 0x51,
- 0xef, 0xd2, 0x5d, 0x28, 0xa5, 0x43, 0xa5, 0x24, 0x05, 0x46, 0x7f, 0x16, 0x85, 0x92, 0x85, 0xd2,
- 0xac, 0xa4, 0x89, 0x68, 0xff, 0xa1, 0x04, 0x9d, 0x09, 0xe3, 0x17, 0x8c, 0x1f, 0x31, 0x21, 0xbc,
- 0xb9, 0xba, 0x12, 0xf2, 0xbf, 0x1c, 0xcd, 0xa7, 0x9b, 0xdf, 0x95, 0xd9, 0x9a, 0x7b, 0x17, 0xaa,
- 0x58, 0xc1, 0xcc, 0xc4, 0xa7, 0x99, 0x8c, 0x6c, 0x76, 0x50, 0xa0, 0x1a, 0x21, 0x9f, 0x42, 0x9d,
- 0xeb, 0xb5, 0x5a, 0xbd, 0xd5, 0x78, 0x32, 0x9b, 0xf6, 0x41, 0x81, 0x26, 0x28, 0xfa, 0x52, 0xbb,
- 0xad, 0x19, 0x4a, 0xcd, 0xf4, 0x93, 0xd0, 0x97, 0x42, 0xc8, 0x23, 0x68, 0x70, 0x26, 0xe2, 0x28,
- 0x14, 0xcc, 0xdc, 0x42, 0x37, 0x75, 0x51, 0xac, 0x56, 0x5e, 0xe8, 0x53, 0x03, 0x1d, 0x14, 0x68,
- 0x4a, 0x23, 0xf7, 0xa1, 0x16, 0x46, 0x32, 0x98, 0x25, 0x97, 0x90, 0xda, 0xc5, 0xf4, 0xb7, 0x8e,
- 0x94, 0x1e, 0xc7, 0xb7, 0x66, 0xec, 0x35, 0xa1, 0x1e, 0x7b, 0x97, 0xcb, 0xc8, 0xf3, 0xed, 0xdf,
- 0xa9, 0xbb, 0xe1, 0x2c, 0x98, 0x9b, 0xd0, 0x93, 0x1d, 0xb8, 0xc9, 0x19, 0x06, 0x0b, 0x53, 0x14,
- 0x33, 0x1e, 0x44, 0xbe, 0xbb, 0x4a, 0xca, 0x7e, 0x23, 0x85, 0xc6, 0x0a, 0x39, 0x12, 0xe4, 0xa7,
- 0xb0, 0xa9, 0xbe, 0xdf, 0x4d, 0x13, 0x1c, 0xe0, 0x8f, 0xe3, 0x0b, 0x6f, 0xe9, 0xae, 0x92, 0xa4,
- 0x7c, 0xa0, 0x18, 0x13, 0x43, 0x18, 0x1a, 0xfc, 0x48, 0xd8, 0xbf, 0x2f, 0x43, 0x67, 0x7f, 0x19,
- 0xb0, 0x50, 0x9a, 0x4f, 0xfb, 0xa1, 0xf9, 0xd0, 0x3b, 0x81, 0x49, 0x79, 0xe0, 0xe3, 0x8a, 0x37,
- 0xd3, 0x1e, 0xcc, 0xd5, 0xaf, 0x57, 0xbc, 0xbc, 0xeb, 0x34, 0x7a, 0x09, 0x53, 0x8d, 0xaf, 0x68,
- 0xcd, 0x67, 0x4c, 0x65, 0xc2, 0x8c, 0xaf, 0xab, 0x36, 0x13, 0x85, 0x53, 0xc3, 0x23, 0x9f, 0x01,
- 0xde, 0x80, 0x67, 0xc1, 0xdc, 0x64, 0x65, 0xc3, 0xb4, 0x6a, 0x16, 0x3f, 0x6a, 0x08, 0xf6, 0x03,
- 0xa8, 0x69, 0x63, 0xd2, 0x84, 0xea, 0x64, 0x8a, 0xa3, 0x49, 0x0d, 0x31, 0x7a, 0x32, 0x52, 0xbf,
- 0xd6, 0x8a, 0x38, 0xdd, 0x9c, 0x17, 0xce, 0x68, 0x3a, 0xb1, 0x4a, 0xf6, 0xb7, 0x50, 0x4f, 0x22,
- 0xd0, 0x84, 0xea, 0x81, 0x73, 0x78, 0x78, 0x6c, 0xe8, 0xce, 0xf3, 0x13, 0x67, 0x32, 0xb5, 0x8a,
- 0xe4, 0x06, 0xb4, 0xc6, 0x27, 0x93, 0x03, 0xd7, 0x19, 0xed, 0xee, 0x1d, 0x3a, 0x56, 0x89, 0x58,
- 0xd0, 0x56, 0x8a, 0xc1, 0x70, 0xa2, 0x34, 0x65, 0xd2, 0x05, 0x50, 0x9a, 0xf1, 0xee, 0xc9, 0xc4,
- 0xb1, 0x2a, 0xa9, 0x09, 0x75, 0x26, 0x27, 0x47, 0x8e, 0x55, 0x25, 0x1b, 0xd0, 0x39, 0x19, 0xe3,
- 0x98, 0x74, 0xf7, 0x8f, 0x47, 0x3f, 0x1f, 0x3e, 0xb5, 0xea, 0xf6, 0xdf, 0x8a, 0x70, 0xe3, 0x5a,
- 0x5d, 0xe5, 0x36, 0x2e, 0x1d, 0xdd, 0x3e, 0xd4, 0xb8, 0xfa, 0x13, 0xc5, 0xcc, 0xf9, 0xcd, 0x37,
- 0x14, 0xe3, 0x8e, 0xfe, 0x9b, 0x85, 0x1a, 0x66, 0x36, 0x18, 0x75, 0x0b, 0x9a, 0xc1, 0xf8, 0x25,
- 0x58, 0xec, 0xec, 0x0c, 0x87, 0xd9, 0x05, 0x73, 0x4d, 0x28, 0x2b, 0x6f, 0x0b, 0xe5, 0x8d, 0x94,
- 0xaa, 0xf5, 0xf6, 0x2d, 0xa8, 0xe9, 0xb7, 0x90, 0x1a, 0x94, 0x8e, 0xbf, 0xb6, 0x0a, 0xa4, 0x0e,
- 0x65, 0x87, 0x52, 0xab, 0x88, 0x3f, 0x11, 0xf3, 0xc5, 0x4e, 0x3e, 0x83, 0xca, 0x79, 0x10, 0xea,
- 0x8f, 0xe8, 0xf6, 0xdf, 0xbf, 0xde, 0x0c, 0x3b, 0x5f, 0x07, 0xa1, 0x4f, 0x15, 0xc5, 0xbe, 0x0d,
- 0x15, 0x94, 0xc8, 0xfb, 0xb0, 0x31, 0x18, 0x4e, 0xf6, 0x77, 0xe9, 0x60, 0x38, 0x7a, 0xea, 0x9a,
- 0xd4, 0x14, 0xee, 0x3b, 0x6f, 0xbc, 0x8e, 0x00, 0x6a, 0xfb, 0x87, 0xc7, 0x13, 0x67, 0x60, 0x15,
- 0x31, 0x4d, 0xc7, 0x63, 0x67, 0x84, 0x57, 0x53, 0x09, 0x05, 0x04, 0x50, 0x28, 0x63, 0x2e, 0x1d,
- 0x4a, 0x8f, 0xa9, 0x55, 0xb9, 0x7f, 0x0f, 0x2a, 0xb8, 0x89, 0xe2, 0x65, 0x35, 0x1c, 0x0d, 0xa7,
- 0xc3, 0xdd, 0xe9, 0x31, 0xb5, 0x0a, 0x28, 0x52, 0x67, 0x32, 0x3e, 0x1e, 0x0d, 0xf0, 0xb7, 0xfc,
- 0x5e, 0xef, 0xef, 0x2f, 0xb7, 0x8a, 0xdf, 0xbd, 0xdc, 0x2a, 0xfe, 0xe7, 0xe5, 0x56, 0xf1, 0x8f,
- 0xaf, 0xb6, 0x0a, 0xdf, 0xbd, 0xda, 0x2a, 0xfc, 0xf3, 0xd5, 0x56, 0xe1, 0xb4, 0xa6, 0x56, 0xa1,
- 0xc7, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x4b, 0x9d, 0x7a, 0x0f, 0x13, 0x00, 0x00,
-}
-
-func (m *Version) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Version) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Version != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResultCounter) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResultCounter) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResultCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Err != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Err))
- i--
- dAtA[i] = 0x18
- }
- if m.Ok != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Ok))
- i--
- dAtA[i] = 0x10
- }
- if m.Total != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Total))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SlidingCounter) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SlidingCounter) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SlidingCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Over_24Hr != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_24Hr))
- i--
- dAtA[i] = 0x50
- }
- if m.Over_12Hr != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_12Hr))
- i--
- dAtA[i] = 0x48
- }
- if m.Over_8Hr != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_8Hr))
- i--
- dAtA[i] = 0x40
- }
- if m.Over_4Hr != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_4Hr))
- i--
- dAtA[i] = 0x38
- }
- if m.Over_2Hr != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_2Hr))
- i--
- dAtA[i] = 0x30
- }
- if m.Over_1Hr != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_1Hr))
- i--
- dAtA[i] = 0x28
- }
- if m.Over_30M != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_30M))
- i--
- dAtA[i] = 0x20
- }
- if m.Over_15M != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_15M))
- i--
- dAtA[i] = 0x18
- }
- if m.Over_5M != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_5M))
- i--
- dAtA[i] = 0x10
- }
- if m.Over_1M != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_1M))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DataGauge) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DataGauge) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DataGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.InstBw != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.InstBw))
- i--
- dAtA[i] = 0x18
- }
- if m.CumPackets != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.CumPackets))
- i--
- dAtA[i] = 0x10
- }
- if m.CumBytes != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.CumBytes))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EventType) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EventType) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EventType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.PropertyTypes) > 0 {
- for iNdEx := len(m.PropertyTypes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.PropertyTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EventType_EventProperty) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EventType_EventProperty) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EventType_EventProperty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.HasMultiple {
- i--
- if m.HasMultiple {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if m.Type != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Runtime) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Runtime) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.EventTypes) > 0 {
- for iNdEx := len(m.EventTypes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.EventTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- if len(m.PeerId) > 0 {
- i -= len(m.PeerId)
- copy(dAtA[i:], m.PeerId)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.PeerId)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Platform) > 0 {
- i -= len(m.Platform)
- copy(dAtA[i:], m.Platform)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Platform)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Implementation) > 0 {
- i -= len(m.Implementation)
- copy(dAtA[i:], m.Implementation)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Implementation)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EndpointPair) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EndpointPair) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EndpointPair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.DstMultiaddr) > 0 {
- i -= len(m.DstMultiaddr)
- copy(dAtA[i:], m.DstMultiaddr)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.DstMultiaddr)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.SrcMultiaddr) > 0 {
- i -= len(m.SrcMultiaddr)
- copy(dAtA[i:], m.SrcMultiaddr)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.SrcMultiaddr)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Traffic) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Traffic) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Traffic) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TrafficOut != nil {
- {
- size, err := m.TrafficOut.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.TrafficIn != nil {
- {
- size, err := m.TrafficIn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *StreamList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StreamList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Streams) > 0 {
- for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Streams[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.StreamIds) > 0 {
- for iNdEx := len(m.StreamIds) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.StreamIds[iNdEx])
- copy(dAtA[i:], m.StreamIds[iNdEx])
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.StreamIds[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Connection) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Connection) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Connection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.UserProvidedTags) > 0 {
- for iNdEx := len(m.UserProvidedTags) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.UserProvidedTags[iNdEx])
- copy(dAtA[i:], m.UserProvidedTags[iNdEx])
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UserProvidedTags[iNdEx])))
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0x9a
- }
- }
- if m.RelayedOver != nil {
- {
- size := m.RelayedOver.Size()
- i -= size
- if _, err := m.RelayedOver.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Streams != nil {
- {
- size, err := m.Streams.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- if m.LatencyNs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.LatencyNs))
- i--
- dAtA[i] = 0x50
- }
- if m.Attribs != nil {
- {
- size, err := m.Attribs.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- if m.Traffic != nil {
- {
- size, err := m.Traffic.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- if m.Role != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Role))
- i--
- dAtA[i] = 0x38
- }
- if m.Timeline != nil {
- {
- size, err := m.Timeline.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.Endpoints != nil {
- {
- size, err := m.Endpoints.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if len(m.TransportId) > 0 {
- i -= len(m.TransportId)
- copy(dAtA[i:], m.TransportId)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.TransportId)))
- i--
- dAtA[i] = 0x22
- }
- if m.Status != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Status))
- i--
- dAtA[i] = 0x18
- }
- if len(m.PeerId) > 0 {
- i -= len(m.PeerId)
- copy(dAtA[i:], m.PeerId)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.PeerId)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Id) > 0 {
- i -= len(m.Id)
- copy(dAtA[i:], m.Id)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Id)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Connection_ConnId) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Connection_ConnId) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ConnId != nil {
- i -= len(m.ConnId)
- copy(dAtA[i:], m.ConnId)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ConnId)))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x82
- }
- return len(dAtA) - i, nil
-}
-func (m *Connection_Conn) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Connection_Conn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Conn != nil {
- {
- size, err := m.Conn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x8a
- }
- return len(dAtA) - i, nil
-}
-func (m *Connection_Timeline) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Connection_Timeline) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Connection_Timeline) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.CloseTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.CloseTs))
- i--
- dAtA[i] = 0x18
- }
- if m.UpgradedTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.UpgradedTs))
- i--
- dAtA[i] = 0x10
- }
- if m.OpenTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.OpenTs))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Connection_Attributes) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Connection_Attributes) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Connection_Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Encryption) > 0 {
- i -= len(m.Encryption)
- copy(dAtA[i:], m.Encryption)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Encryption)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Multiplexer) > 0 {
- i -= len(m.Multiplexer)
- copy(dAtA[i:], m.Multiplexer)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Multiplexer)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Stream) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Stream) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Stream) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.UserProvidedTags) > 0 {
- for iNdEx := len(m.UserProvidedTags) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.UserProvidedTags[iNdEx])
- copy(dAtA[i:], m.UserProvidedTags[iNdEx])
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UserProvidedTags[iNdEx])))
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0x9a
- }
- }
- if m.LatencyNs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.LatencyNs))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x80
- }
- if m.Status != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Status))
- i--
- dAtA[i] = 0x38
- }
- if m.Timeline != nil {
- {
- size, err := m.Timeline.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.Conn != nil {
- {
- size, err := m.Conn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.Traffic != nil {
- {
- size, err := m.Traffic.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.Role != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Role))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Protocol) > 0 {
- i -= len(m.Protocol)
- copy(dAtA[i:], m.Protocol)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Protocol)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Id) > 0 {
- i -= len(m.Id)
- copy(dAtA[i:], m.Id)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Id)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Stream_ConnectionRef) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Stream_ConnectionRef) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Stream_ConnectionRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Connection != nil {
- {
- size := m.Connection.Size()
- i -= size
- if _, err := m.Connection.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Stream_ConnectionRef_Conn) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Stream_ConnectionRef_Conn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Conn != nil {
- {
- size, err := m.Conn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *Stream_ConnectionRef_ConnId) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Stream_ConnectionRef_ConnId) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ConnId != nil {
- i -= len(m.ConnId)
- copy(dAtA[i:], m.ConnId)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ConnId)))
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *Stream_Timeline) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Stream_Timeline) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Stream_Timeline) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.CloseTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.CloseTs))
- i--
- dAtA[i] = 0x10
- }
- if m.OpenTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.OpenTs))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DHT) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DHT) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DHT) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.OutgoingQueries != nil {
- {
- size, err := m.OutgoingQueries.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- if m.IncomingQueries != nil {
- {
- size, err := m.IncomingQueries.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if len(m.Buckets) > 0 {
- for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Buckets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- }
- if m.Params != nil {
- {
- size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.StartTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.StartTs))
- i--
- dAtA[i] = 0x18
- }
- if m.Enabled {
- i--
- if m.Enabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if len(m.Protocol) > 0 {
- i -= len(m.Protocol)
- copy(dAtA[i:], m.Protocol)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Protocol)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DHT_Params) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DHT_Params) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DHT_Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Beta != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Beta))
- i--
- dAtA[i] = 0x20
- }
- if m.DisjointPaths != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.DisjointPaths))
- i--
- dAtA[i] = 0x18
- }
- if m.Alpha != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Alpha))
- i--
- dAtA[i] = 0x10
- }
- if m.K != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.K))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DHT_PeerInDHT) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DHT_PeerInDHT) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DHT_PeerInDHT) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.AgeInBucket != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.AgeInBucket))
- i--
- dAtA[i] = 0x18
- }
- if m.Status != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Status))
- i--
- dAtA[i] = 0x10
- }
- if len(m.PeerId) > 0 {
- i -= len(m.PeerId)
- copy(dAtA[i:], m.PeerId)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.PeerId)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DHT_Bucket) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DHT_Bucket) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DHT_Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Peers) > 0 {
- for iNdEx := len(m.Peers) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Peers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Cpl != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Cpl))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DHT_QueryGauge) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DHT_QueryGauge) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DHT_QueryGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Timeout != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Timeout))
- i--
- dAtA[i] = 0x18
- }
- if m.Error != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Error))
- i--
- dAtA[i] = 0x10
- }
- if m.Success != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Success))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Subsystems) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Subsystems) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Subsystems) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Dht != nil {
- {
- size, err := m.Dht.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if len(m.Connections) > 0 {
- for iNdEx := len(m.Connections) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Connections[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *State) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *State) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.SnapshotDurationMs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.SnapshotDurationMs))
- i--
- dAtA[i] = 0x28
- }
- if m.StartTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.StartTs))
- i--
- dAtA[i] = 0x20
- }
- if m.InstantTs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.InstantTs))
- i--
- dAtA[i] = 0x18
- }
- if m.Traffic != nil {
- {
- size, err := m.Traffic.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Subsystems != nil {
- {
- size, err := m.Subsystems.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Event) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Event) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Content) > 0 {
- i -= len(m.Content)
- copy(dAtA[i:], m.Content)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Content)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Ts != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Ts))
- i--
- dAtA[i] = 0x10
- }
- if m.Type != nil {
- {
- size, err := m.Type.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ServerMessage) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServerMessage) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Payload != nil {
- {
- size := m.Payload.Size()
- i -= size
- if _, err := m.Payload.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if m.Version != nil {
- {
- size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ServerMessage_State) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerMessage_State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.State != nil {
- {
- size, err := m.State.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *ServerMessage_Runtime) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerMessage_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Runtime != nil {
- {
- size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *ServerMessage_Event) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerMessage_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Event != nil {
- {
- size, err := m.Event.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- return len(dAtA) - i, nil
-}
-func (m *ServerMessage_Response) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerMessage_Response) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Response != nil {
- {
- size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- return len(dAtA) - i, nil
-}
-func (m *ServerMessage_Notice) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerMessage_Notice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Notice != nil {
- {
- size, err := m.Notice.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- return len(dAtA) - i, nil
-}
-func (m *Configuration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Configuration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Configuration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.StateSnapshotIntervalMs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.StateSnapshotIntervalMs))
- i--
- dAtA[i] = 0x10
- }
- if m.RetentionPeriodMs != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.RetentionPeriodMs))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ClientCommand) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ClientCommand) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ClientCommand) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Config != nil {
- {
- size, err := m.Config.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.Source != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Source))
- i--
- dAtA[i] = 0x20
- }
- if m.Command != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Command))
- i--
- dAtA[i] = 0x18
- }
- if m.Id != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Id))
- i--
- dAtA[i] = 0x10
- }
- if m.Version != nil {
- {
- size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CommandResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CommandResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CommandResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.EffectiveConfig != nil {
- {
- size, err := m.EffectiveConfig.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIntrospection(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if len(m.Error) > 0 {
- i -= len(m.Error)
- copy(dAtA[i:], m.Error)
- i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Error)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Result != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Result))
- i--
- dAtA[i] = 0x10
- }
- if m.Id != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Id))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ServerNotice) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ServerNotice) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServerNotice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Kind != 0 {
- i = encodeVarintIntrospection(dAtA, i, uint64(m.Kind))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
- offset -= sovIntrospection(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Version) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Version != 0 {
- n += 1 + sovIntrospection(uint64(m.Version))
- }
- return n
-}
-
-func (m *ResultCounter) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Total != 0 {
- n += 1 + sovIntrospection(uint64(m.Total))
- }
- if m.Ok != 0 {
- n += 1 + sovIntrospection(uint64(m.Ok))
- }
- if m.Err != 0 {
- n += 1 + sovIntrospection(uint64(m.Err))
- }
- return n
-}
-
-func (m *SlidingCounter) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Over_1M != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_1M))
- }
- if m.Over_5M != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_5M))
- }
- if m.Over_15M != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_15M))
- }
- if m.Over_30M != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_30M))
- }
- if m.Over_1Hr != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_1Hr))
- }
- if m.Over_2Hr != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_2Hr))
- }
- if m.Over_4Hr != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_4Hr))
- }
- if m.Over_8Hr != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_8Hr))
- }
- if m.Over_12Hr != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_12Hr))
- }
- if m.Over_24Hr != 0 {
- n += 1 + sovIntrospection(uint64(m.Over_24Hr))
- }
- return n
-}
-
-func (m *DataGauge) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.CumBytes != 0 {
- n += 1 + sovIntrospection(uint64(m.CumBytes))
- }
- if m.CumPackets != 0 {
- n += 1 + sovIntrospection(uint64(m.CumPackets))
- }
- if m.InstBw != 0 {
- n += 1 + sovIntrospection(uint64(m.InstBw))
- }
- return n
-}
-
-func (m *EventType) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if len(m.PropertyTypes) > 0 {
- for _, e := range m.PropertyTypes {
- l = e.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- return n
-}
-
-func (m *EventType_EventProperty) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Type != 0 {
- n += 1 + sovIntrospection(uint64(m.Type))
- }
- if m.HasMultiple {
- n += 2
- }
- return n
-}
-
-func (m *Runtime) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Implementation)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.Platform)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.PeerId)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if len(m.EventTypes) > 0 {
- for _, e := range m.EventTypes {
- l = e.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- return n
-}
-
-func (m *EndpointPair) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.SrcMultiaddr)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.DstMultiaddr)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *Traffic) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TrafficIn != nil {
- l = m.TrafficIn.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.TrafficOut != nil {
- l = m.TrafficOut.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *StreamList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.StreamIds) > 0 {
- for _, b := range m.StreamIds {
- l = len(b)
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- if len(m.Streams) > 0 {
- for _, e := range m.Streams {
- l = e.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- return n
-}
-
-func (m *Connection) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Id)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.PeerId)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Status != 0 {
- n += 1 + sovIntrospection(uint64(m.Status))
- }
- l = len(m.TransportId)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Endpoints != nil {
- l = m.Endpoints.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Timeline != nil {
- l = m.Timeline.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Role != 0 {
- n += 1 + sovIntrospection(uint64(m.Role))
- }
- if m.Traffic != nil {
- l = m.Traffic.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Attribs != nil {
- l = m.Attribs.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.LatencyNs != 0 {
- n += 1 + sovIntrospection(uint64(m.LatencyNs))
- }
- if m.Streams != nil {
- l = m.Streams.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.RelayedOver != nil {
- n += m.RelayedOver.Size()
- }
- if len(m.UserProvidedTags) > 0 {
- for _, s := range m.UserProvidedTags {
- l = len(s)
- n += 2 + l + sovIntrospection(uint64(l))
- }
- }
- return n
-}
-
-func (m *Connection_ConnId) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ConnId != nil {
- l = len(m.ConnId)
- n += 2 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *Connection_Conn) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Conn != nil {
- l = m.Conn.Size()
- n += 2 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *Connection_Timeline) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.OpenTs != 0 {
- n += 1 + sovIntrospection(uint64(m.OpenTs))
- }
- if m.UpgradedTs != 0 {
- n += 1 + sovIntrospection(uint64(m.UpgradedTs))
- }
- if m.CloseTs != 0 {
- n += 1 + sovIntrospection(uint64(m.CloseTs))
- }
- return n
-}
-
-func (m *Connection_Attributes) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Multiplexer)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.Encryption)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *Stream) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Id)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- l = len(m.Protocol)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Role != 0 {
- n += 1 + sovIntrospection(uint64(m.Role))
- }
- if m.Traffic != nil {
- l = m.Traffic.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Conn != nil {
- l = m.Conn.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Timeline != nil {
- l = m.Timeline.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Status != 0 {
- n += 1 + sovIntrospection(uint64(m.Status))
- }
- if m.LatencyNs != 0 {
- n += 2 + sovIntrospection(uint64(m.LatencyNs))
- }
- if len(m.UserProvidedTags) > 0 {
- for _, s := range m.UserProvidedTags {
- l = len(s)
- n += 2 + l + sovIntrospection(uint64(l))
- }
- }
- return n
-}
-
-func (m *Stream_ConnectionRef) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Connection != nil {
- n += m.Connection.Size()
- }
- return n
-}
-
-func (m *Stream_ConnectionRef_Conn) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Conn != nil {
- l = m.Conn.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *Stream_ConnectionRef_ConnId) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ConnId != nil {
- l = len(m.ConnId)
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *Stream_Timeline) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.OpenTs != 0 {
- n += 1 + sovIntrospection(uint64(m.OpenTs))
- }
- if m.CloseTs != 0 {
- n += 1 + sovIntrospection(uint64(m.CloseTs))
- }
- return n
-}
-
-func (m *DHT) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Protocol)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Enabled {
- n += 2
- }
- if m.StartTs != 0 {
- n += 1 + sovIntrospection(uint64(m.StartTs))
- }
- if m.Params != nil {
- l = m.Params.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if len(m.Buckets) > 0 {
- for _, e := range m.Buckets {
- l = e.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- if m.IncomingQueries != nil {
- l = m.IncomingQueries.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.OutgoingQueries != nil {
- l = m.OutgoingQueries.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *DHT_Params) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.K != 0 {
- n += 1 + sovIntrospection(uint64(m.K))
- }
- if m.Alpha != 0 {
- n += 1 + sovIntrospection(uint64(m.Alpha))
- }
- if m.DisjointPaths != 0 {
- n += 1 + sovIntrospection(uint64(m.DisjointPaths))
- }
- if m.Beta != 0 {
- n += 1 + sovIntrospection(uint64(m.Beta))
- }
- return n
-}
-
-func (m *DHT_PeerInDHT) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.PeerId)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Status != 0 {
- n += 1 + sovIntrospection(uint64(m.Status))
- }
- if m.AgeInBucket != 0 {
- n += 1 + sovIntrospection(uint64(m.AgeInBucket))
- }
- return n
-}
-
-func (m *DHT_Bucket) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Cpl != 0 {
- n += 1 + sovIntrospection(uint64(m.Cpl))
- }
- if len(m.Peers) > 0 {
- for _, e := range m.Peers {
- l = e.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- return n
-}
-
-func (m *DHT_QueryGauge) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Success != 0 {
- n += 1 + sovIntrospection(uint64(m.Success))
- }
- if m.Error != 0 {
- n += 1 + sovIntrospection(uint64(m.Error))
- }
- if m.Timeout != 0 {
- n += 1 + sovIntrospection(uint64(m.Timeout))
- }
- return n
-}
-
-func (m *Subsystems) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Connections) > 0 {
- for _, e := range m.Connections {
- l = e.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- }
- if m.Dht != nil {
- l = m.Dht.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *State) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Subsystems != nil {
- l = m.Subsystems.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Traffic != nil {
- l = m.Traffic.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.InstantTs != 0 {
- n += 1 + sovIntrospection(uint64(m.InstantTs))
- }
- if m.StartTs != 0 {
- n += 1 + sovIntrospection(uint64(m.StartTs))
- }
- if m.SnapshotDurationMs != 0 {
- n += 1 + sovIntrospection(uint64(m.SnapshotDurationMs))
- }
- return n
-}
-
-func (m *Event) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != nil {
- l = m.Type.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Ts != 0 {
- n += 1 + sovIntrospection(uint64(m.Ts))
- }
- l = len(m.Content)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *ServerMessage) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Version != nil {
- l = m.Version.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Payload != nil {
- n += m.Payload.Size()
- }
- return n
-}
-
-func (m *ServerMessage_State) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.State != nil {
- l = m.State.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *ServerMessage_Runtime) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Runtime != nil {
- l = m.Runtime.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *ServerMessage_Event) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Event != nil {
- l = m.Event.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *ServerMessage_Response) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Response != nil {
- l = m.Response.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *ServerMessage_Notice) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Notice != nil {
- l = m.Notice.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-func (m *Configuration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RetentionPeriodMs != 0 {
- n += 1 + sovIntrospection(uint64(m.RetentionPeriodMs))
- }
- if m.StateSnapshotIntervalMs != 0 {
- n += 1 + sovIntrospection(uint64(m.StateSnapshotIntervalMs))
- }
- return n
-}
-
-func (m *ClientCommand) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Version != nil {
- l = m.Version.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.Id != 0 {
- n += 1 + sovIntrospection(uint64(m.Id))
- }
- if m.Command != 0 {
- n += 1 + sovIntrospection(uint64(m.Command))
- }
- if m.Source != 0 {
- n += 1 + sovIntrospection(uint64(m.Source))
- }
- if m.Config != nil {
- l = m.Config.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *CommandResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != 0 {
- n += 1 + sovIntrospection(uint64(m.Id))
- }
- if m.Result != 0 {
- n += 1 + sovIntrospection(uint64(m.Result))
- }
- l = len(m.Error)
- if l > 0 {
- n += 1 + l + sovIntrospection(uint64(l))
- }
- if m.EffectiveConfig != nil {
- l = m.EffectiveConfig.Size()
- n += 1 + l + sovIntrospection(uint64(l))
- }
- return n
-}
-
-func (m *ServerNotice) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Kind != 0 {
- n += 1 + sovIntrospection(uint64(m.Kind))
- }
- return n
-}
-
-func sovIntrospection(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozIntrospection(x uint64) (n int) {
- return sovIntrospection(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Version) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Version: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- m.Version = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Version |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResultCounter) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResultCounter: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResultCounter: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
- }
- m.Total = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Total |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ok", wireType)
- }
- m.Ok = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Ok |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType)
- }
- m.Err = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Err |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SlidingCounter) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SlidingCounter: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SlidingCounter: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_1M", wireType)
- }
- m.Over_1M = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_1M |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_5M", wireType)
- }
- m.Over_5M = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_5M |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_15M", wireType)
- }
- m.Over_15M = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_15M |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_30M", wireType)
- }
- m.Over_30M = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_30M |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_1Hr", wireType)
- }
- m.Over_1Hr = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_1Hr |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_2Hr", wireType)
- }
- m.Over_2Hr = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_2Hr |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_4Hr", wireType)
- }
- m.Over_4Hr = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_4Hr |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_8Hr", wireType)
- }
- m.Over_8Hr = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_8Hr |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_12Hr", wireType)
- }
- m.Over_12Hr = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_12Hr |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Over_24Hr", wireType)
- }
- m.Over_24Hr = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Over_24Hr |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DataGauge) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DataGauge: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DataGauge: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CumBytes", wireType)
- }
- m.CumBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CumBytes |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CumPackets", wireType)
- }
- m.CumPackets = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CumPackets |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InstBw", wireType)
- }
- m.InstBw = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.InstBw |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EventType) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EventType: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EventType: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PropertyTypes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PropertyTypes = append(m.PropertyTypes, &EventType_EventProperty{})
- if err := m.PropertyTypes[len(m.PropertyTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EventType_EventProperty) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EventProperty: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EventProperty: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= EventType_EventProperty_PropertyType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HasMultiple", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.HasMultiple = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Runtime) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Runtime: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Implementation", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Implementation = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Platform = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerId = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EventTypes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EventTypes = append(m.EventTypes, &EventType{})
- if err := m.EventTypes[len(m.EventTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EndpointPair) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EndpointPair: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EndpointPair: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SrcMultiaddr", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SrcMultiaddr = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DstMultiaddr", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DstMultiaddr = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Traffic) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Traffic: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Traffic: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TrafficIn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.TrafficIn == nil {
- m.TrafficIn = &DataGauge{}
- }
- if err := m.TrafficIn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TrafficOut", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.TrafficOut == nil {
- m.TrafficOut = &DataGauge{}
- }
- if err := m.TrafficOut.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StreamList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StreamList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StreamList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StreamIds", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.StreamIds = append(m.StreamIds, make([]byte, postIndex-iNdEx))
- copy(m.StreamIds[len(m.StreamIds)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Streams = append(m.Streams, &Stream{})
- if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Connection) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Connection: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Connection: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...)
- if m.Id == nil {
- m.Id = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerId = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- m.Status = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Status |= Status(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TransportId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TransportId = append(m.TransportId[:0], dAtA[iNdEx:postIndex]...)
- if m.TransportId == nil {
- m.TransportId = []byte{}
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Endpoints == nil {
- m.Endpoints = &EndpointPair{}
- }
- if err := m.Endpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Timeline", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Timeline == nil {
- m.Timeline = &Connection_Timeline{}
- }
- if err := m.Timeline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- m.Role = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Role |= Role(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Traffic", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Traffic == nil {
- m.Traffic = &Traffic{}
- }
- if err := m.Traffic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attribs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Attribs == nil {
- m.Attribs = &Connection_Attributes{}
- }
- if err := m.Attribs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LatencyNs", wireType)
- }
- m.LatencyNs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LatencyNs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Streams == nil {
- m.Streams = &StreamList{}
- }
- if err := m.Streams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 16:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ConnId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := make([]byte, postIndex-iNdEx)
- copy(v, dAtA[iNdEx:postIndex])
- m.RelayedOver = &Connection_ConnId{v}
- iNdEx = postIndex
- case 17:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Connection{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RelayedOver = &Connection_Conn{v}
- iNdEx = postIndex
- case 99:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UserProvidedTags", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UserProvidedTags = append(m.UserProvidedTags, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Connection_Timeline) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Timeline: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Timeline: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OpenTs", wireType)
- }
- m.OpenTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.OpenTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UpgradedTs", wireType)
- }
- m.UpgradedTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UpgradedTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CloseTs", wireType)
- }
- m.CloseTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CloseTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Connection_Attributes) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Attributes: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Multiplexer", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Multiplexer = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Encryption", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Encryption = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Stream) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Stream: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...)
- if m.Id == nil {
- m.Id = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Protocol = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- m.Role = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Role |= Role(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Traffic", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Traffic == nil {
- m.Traffic = &Traffic{}
- }
- if err := m.Traffic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Conn == nil {
- m.Conn = &Stream_ConnectionRef{}
- }
- if err := m.Conn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Timeline", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Timeline == nil {
- m.Timeline = &Stream_Timeline{}
- }
- if err := m.Timeline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- m.Status = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Status |= Status(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 16:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LatencyNs", wireType)
- }
- m.LatencyNs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LatencyNs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 99:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UserProvidedTags", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.UserProvidedTags = append(m.UserProvidedTags, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Stream_ConnectionRef) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConnectionRef: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConnectionRef: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Connection{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Connection = &Stream_ConnectionRef_Conn{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ConnId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := make([]byte, postIndex-iNdEx)
- copy(v, dAtA[iNdEx:postIndex])
- m.Connection = &Stream_ConnectionRef_ConnId{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Stream_Timeline) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Timeline: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Timeline: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OpenTs", wireType)
- }
- m.OpenTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.OpenTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CloseTs", wireType)
- }
- m.CloseTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CloseTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DHT) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DHT: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DHT: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Protocol = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Enabled = bool(v != 0)
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
- }
- m.StartTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StartTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Params == nil {
- m.Params = &DHT_Params{}
- }
- if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Buckets = append(m.Buckets, &DHT_Bucket{})
- if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IncomingQueries", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.IncomingQueries == nil {
- m.IncomingQueries = &DHT_QueryGauge{}
- }
- if err := m.IncomingQueries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OutgoingQueries", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.OutgoingQueries == nil {
- m.OutgoingQueries = &DHT_QueryGauge{}
- }
- if err := m.OutgoingQueries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DHT_Params) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Params: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field K", wireType)
- }
- m.K = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.K |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alpha", wireType)
- }
- m.Alpha = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Alpha |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DisjointPaths", wireType)
- }
- m.DisjointPaths = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DisjointPaths |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Beta", wireType)
- }
- m.Beta = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Beta |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DHT_PeerInDHT) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PeerInDHT: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PeerInDHT: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerId = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- m.Status = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Status |= DHT_PeerInDHT_Status(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AgeInBucket", wireType)
- }
- m.AgeInBucket = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AgeInBucket |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DHT_Bucket) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Bucket: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cpl", wireType)
- }
- m.Cpl = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Cpl |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Peers", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Peers = append(m.Peers, &DHT_PeerInDHT{})
- if err := m.Peers[len(m.Peers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DHT_QueryGauge) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryGauge: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryGauge: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
- }
- m.Success = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Success |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
- }
- m.Error = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Error |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType)
- }
- m.Timeout = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Timeout |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Subsystems) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Subsystems: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Subsystems: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Connections", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Connections = append(m.Connections, &Connection{})
- if err := m.Connections[len(m.Connections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dht", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Dht == nil {
- m.Dht = &DHT{}
- }
- if err := m.Dht.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *State) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: State: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Subsystems", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Subsystems == nil {
- m.Subsystems = &Subsystems{}
- }
- if err := m.Subsystems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Traffic", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Traffic == nil {
- m.Traffic = &Traffic{}
- }
- if err := m.Traffic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InstantTs", wireType)
- }
- m.InstantTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.InstantTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType)
- }
- m.StartTs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StartTs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SnapshotDurationMs", wireType)
- }
- m.SnapshotDurationMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SnapshotDurationMs |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Event) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Event: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Type == nil {
- m.Type = &EventType{}
- }
- if err := m.Type.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ts", wireType)
- }
- m.Ts = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Ts |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Content = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ServerMessage) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ServerMessage: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ServerMessage: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Version == nil {
- m.Version = &Version{}
- }
- if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &State{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Payload = &ServerMessage_State{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Runtime{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Payload = &ServerMessage_Runtime{v}
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &Event{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Payload = &ServerMessage_Event{v}
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &CommandResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Payload = &ServerMessage_Response{v}
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Notice", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &ServerNotice{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Payload = &ServerMessage_Notice{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Configuration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Configuration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Configuration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RetentionPeriodMs", wireType)
- }
- m.RetentionPeriodMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RetentionPeriodMs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StateSnapshotIntervalMs", wireType)
- }
- m.StateSnapshotIntervalMs = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StateSnapshotIntervalMs |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClientCommand) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClientCommand: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClientCommand: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Version == nil {
- m.Version = &Version{}
- }
- if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- m.Id = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Id |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
- }
- m.Command = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Command |= ClientCommand_Command(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
- }
- m.Source = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Source |= ClientCommand_Source(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Config == nil {
- m.Config = &Configuration{}
- }
- if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CommandResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CommandResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CommandResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- m.Id = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Id |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
- }
- m.Result = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Result |= CommandResponse_Result(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Error = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EffectiveConfig", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIntrospection
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIntrospection
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.EffectiveConfig == nil {
- m.EffectiveConfig = &Configuration{}
- }
- if err := m.EffectiveConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ServerNotice) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ServerNotice: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ServerNotice: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
- }
- m.Kind = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Kind |= ServerNotice_Kind(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipIntrospection(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthIntrospection
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipIntrospection(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIntrospection
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthIntrospection
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupIntrospection
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthIntrospection
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupIntrospection = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto
deleted file mode 100644
index 144e7b995..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto
+++ /dev/null
@@ -1,421 +0,0 @@
-syntax = "proto3";
-
-package pb;
-
-// Version of schema
-message Version {
- uint32 version = 1;
-}
-// ResultCounter is a monotonically increasing counter that reports an ok/err breakdown of the total.
-message ResultCounter {
- uint32 total = 1;
- uint32 ok = 2;
- uint32 err = 3;
-}
-
-// Moving totals over sliding time windows. Models sensible time windows,
-// we don't have to populate them all at once.
-//
-// Graphical example:
-//
-// time past -> present an event 16 min ago
-// ======================================================X================>>
-// | | 1m
-// | |---| 5m
-// | |-------------| 15m
-// |------------X---------------| 30m
-// |------------------------------------------X---------------| 60m
-message SlidingCounter {
- uint32 over_1m = 1;
- uint32 over_5m = 2;
- uint32 over_15m = 3;
- uint32 over_30m = 4;
- uint32 over_1hr = 5;
- uint32 over_2hr = 6;
- uint32 over_4hr = 7;
- uint32 over_8hr = 8;
- uint32 over_12hr = 9;
- uint32 over_24hr = 10;
-}
-
-// DataGauge reports stats for data traffic in a given direction.
-message DataGauge {
- // Cumulative bytes.
- uint64 cum_bytes = 1;
- // Cumulative packets.
- uint64 cum_packets = 2;
- // Instantaneous bandwidth measurement (bytes/second).
- uint64 inst_bw = 3;
-}
-
-// describes a type of event
-message EventType {
- // metadata about content types in event's top-level content JSON
- message EventProperty {
- // tells client how to sort, filter or display known content properties
- enum PropertyType {
- // for properties to treat as a simple primitive
- STRING = 0; // default
- NUMBER = 1;
- // for properties with special human-readable formatting
- TIME = 10;
- PEERID = 11;
- MULTIADDR = 12;
- // for complex structures like nested arrays, object trees etc
- JSON = 90;
- }
- // property name of content e.g. openTs
- string name = 1;
- // type to interpret content value as
- PropertyType type = 2;
- // if true, expect an array of values of `type`; else, singular
- bool has_multiple = 3;
- }
-
- // name of event type, e.g. PeerConnecting
- string name = 1;
- // for runtime, send property_types for all events already seen in events list
- // for events, only send property_types in the first event of a type not in runtime
- repeated EventProperty property_types = 2;
-}
-
-// Runtime encapsulates runtime info about a node.
-message Runtime {
- // e.g. go-libp2p, js-libp2p, rust-libp2p, etc.
- string implementation = 1;
- // e.g. 1.2.3.
- string version = 2;
- // e.g. Windows, Unix, macOS, Chrome, Mozilla, etc.
- string platform = 3;
- // our peer id - the peer id of the host system
- string peer_id = 4;
- // metadata describing configured event types
- repeated EventType event_types = 7;
-}
-
-// EndpointPair is a pair of multiaddrs.
-message EndpointPair {
- // the source multiaddr.
- string src_multiaddr = 1;
- // the destination multiaddr.
- string dst_multiaddr = 2;
-}
-
-// The status of a connection or stream.
-enum Status {
- ACTIVE = 0;
- CLOSED = 1;
- OPENING = 2;
- CLOSING = 3;
- ERROR = 4;
-}
-
-// Our role in a connection or stream.
-enum Role {
- INITIATOR = 0;
- RESPONDER = 1;
-}
-
-// Traffic encloses data transfer statistics.
-message Traffic {
- // snapshot of the data in metrics.
- DataGauge traffic_in = 1;
- // snapshot of the data out metrics.
- DataGauge traffic_out = 2;
-}
-
-// a list of streams, by reference or inlined.
-message StreamList {
- // NOTE: only one of the next 2 fields can appear, but proto3
- // doesn't support combining oneof and repeated.
- //
- // streams within this connection by reference.
- repeated bytes stream_ids = 1;
- // streams within this connection by inlining.
- repeated Stream streams = 2;
-}
-
-// Connection reports metrics and state of a libp2p connection.
-message Connection {
- // Timeline contains the timestamps (ms since epoch) of the well-known milestones of a connection.
- message Timeline {
- // the instant when a connection was opened on the wire.
- uint64 open_ts = 1;
- // the instant when the upgrade process (handshake, security, multiplexing) finished.
- uint64 upgraded_ts = 2;
- // the instant when this connection was terminated.
- uint64 close_ts = 3;
- }
-
- // Attributes encapsulates the attributes of this connection.
- message Attributes {
- // the multiplexer being used.
- string multiplexer = 1;
- // the encryption method being used.
- string encryption = 2;
- }
-
- // the id of this connection, not to be shown in user tooling,
- // used for (cross)referencing connections (e.g. relay).
- bytes id = 1;
- // the peer id of the other party.
- string peer_id = 2;
- // the status of this connection.
- Status status = 3;
- // a reference to the transport managing this connection.
- bytes transport_id = 4;
- // the endpoints participating in this connection.
- EndpointPair endpoints = 5;
- // the timeline of the connection, see Connection.Timeline.
- Timeline timeline = 6;
- // our role in this connection.
- Role role = 7;
- // traffic statistics.
- Traffic traffic = 8;
- // properties of this connection.
- Attributes attribs = 9;
- // the instantaneous latency of this connection in nanoseconds.
- uint64 latency_ns = 10;
- // streams within this connection.
- StreamList streams = 11;
-
- reserved 12 to 15;
-
- // if this is a relayed connection, this points to the relaying connection.
- // a default value here (empty bytes) indicates this is not a relayed connection.
- oneof relayed_over {
- bytes conn_id = 16;
- Connection conn = 17;
- }
- // user provided tags.
- repeated string user_provided_tags = 99;
-}
-
-// Stream reports metrics and state of a libp2p stream.
-message Stream {
- message ConnectionRef {
- oneof connection {
- // the parent connection inlined.
- Connection conn = 1;
- // the parent connection by reference.
- bytes conn_id = 2;
- }
- }
-
- // Timeline contains the timestamps (ms since epoch) of the well-known milestones of a stream.
- message Timeline {
- // the instant when the stream was opened.
- uint64 open_ts = 1;
- // the instant when the stream was terminated.
- uint64 close_ts = 2;
- }
-
- // the id of this stream, not to be shown in user tooling,
- // used for (cross)referencing streams.
- bytes id = 1;
- // the protocol pinned to this stream.
- string protocol = 2;
- // our role in this stream.
- Role role = 3;
- // traffic statistics.
- Traffic traffic = 4;
- // the connection this stream is hosted under.
- ConnectionRef conn = 5;
- // the timeline of the stream, see Stream.Timeline.
- Timeline timeline = 6;
- // the status of this stream.
- Status status = 7;
-
- // the instantaneous latency of this stream in nanoseconds.
- // TODO: this is hard to calculate.
- uint64 latency_ns = 16;
- // user provided tags.
- repeated string user_provided_tags = 99;
-}
-
-// DHT metrics and state.
-message DHT {
- message Params {
- // routing table bucket size.
- uint64 k = 1;
- // concurrency of asynchronous requests.
- uint64 alpha = 2;
- // number of disjoint paths to use.
- uint64 disjoint_paths = 3;
- // number of peers closest to a target that must have responded
- // in order for a given query path to complete
- uint64 beta = 4;
- }
-
- // Peer in DHT
- message PeerInDHT {
- // The DHT's relationship with this peer
- enum Status {
- // Connected, in a bucket, ready to send/receive queries
- ACTIVE = 0;
- // Not currently connected, still "in" a bucket (e.g. temporarily disconnected)
- MISSING = 1;
- // Removed from a bucket or candidate list (e.g. connection lost or too slow)
- REJECTED = 2;
- // Was reachable when last checked, waiting to join a currently-full bucket
- CANDIDATE = 3;
- }
- // the peer id of the host system
- string peer_id = 1;
- // the peer's status when data snapshot is taken
- Status status = 2;
- // age in bucket (ms)
- uint32 age_in_bucket = 3;
- }
-
- // A "k-bucket" containing peers of a certain kadamelia distance
- message Bucket {
- // CPL (Common Prefix Length) is the length of the common prefix
- // between the ids of every peer in this bucket and the DHT peer id
- uint32 cpl = 1;
- // Peers associated with this bucket
- repeated PeerInDHT peers = 2;
- // Bucket may need more fields depending on WIP remodeling
- }
-
- // Counters of query events, by status
- message QueryGauge {
- // Cumulative counter of queries with "SUCCESS" status
- uint64 success = 1;
- // Cumulative counter of queries with "ERROR" status
- uint64 error = 2;
- // Cumulative counter of queries with "TIMEOUT" status
- uint64 timeout = 3;
- }
-
- // DHT protocol name
- string protocol = 1;
- // protocol enabled.
- bool enabled = 2;
- // timestamp (ms since epoch) of start up.
- uint64 start_ts = 3;
- // params of the dht.
- Params params = 4;
- // existing, intantiated buckets and their contents
- repeated Bucket buckets = 5;
- // counts inbound queries received from other peers
- QueryGauge incoming_queries = 6;
- // counts outbound queries dispatched by this peer
- QueryGauge outgoing_queries = 7;
-}
-
-// Subsystems encapsulates all instrumented subsystems for a libp2p host.
-message Subsystems {
- // connections data, source agnostic but currently only supports the Swarm subsystem
- repeated Connection connections = 1;
- // the DHT subsystem.
- DHT dht = 2;
-}
-
-// Connections and streams output for a time interval is one of these.
-message State {
- // list of connections
- Subsystems subsystems = 1;
- // overall traffic for this peer
- Traffic traffic = 2;
- // moment this data snapshot and instantaneous values were taken
- uint64 instant_ts = 3;
- // start of included data collection (cumulative values counted from here)
- uint64 start_ts = 4;
- // length of time up to instant_ts covered by this data snapshot
- uint32 snapshot_duration_ms = 5;
-}
-
-// Event
-message Event {
- // definition of event type, containing only `name` unless this is first encounter of novel event
- EventType type = 1;
- // time this event occurred (ms since epoch)
- uint64 ts = 2;
- // stringified json; top-level keys and value types match EventProperty definitions
- string content = 3;
-}
-
-// ServerMessage wraps messages to be sent to clients to allow extension
-// based on new types of data sources
-message ServerMessage {
- // Version of this protobuf.
- Version version = 1;
- // The payload this message contains.
- oneof payload {
- State state = 2;
- Runtime runtime = 3;
- Event event = 4;
-
- CommandResponse response = 5;
- ServerNotice notice = 6;
- }
-}
-
-// Configuration encapsulates configuration fields for the protocol and commands.
-message Configuration {
- uint64 retention_period_ms = 1;
- uint64 state_snapshot_interval_ms = 2;
-}
-
-// ClientCommand is a command sent from the client to the server.
-message ClientCommand {
- enum Source {
- STATE = 0; // full state snapshot.
- RUNTIME = 1; // runtime data message.
- EVENTS = 2; // eventbus events.
- }
-
- enum Command {
- // HELLO is the first command that a client must send to greet the server.
- // Connections that do not respect this invariant will be terminated.
- HELLO = 0;
-
- // REQUEST is applicable to STATE and RUNTIME sources.
- REQUEST = 1;
-
- // PUSH streams can only be started for STATE and EVENTS sources.
- PUSH_ENABLE = 2; // enables pushing for a given source.
- PUSH_DISABLE = 3; // disables pushing for a given source.
- PUSH_PAUSE = 4; // pauses pushing for all sources.
- PUSH_RESUME = 5; // resumes pushing for all sources.
-
- // UPDATE_CONFIG requests a configuration update. The config field is
- // compulsory.
- //
- // The server reserves the right to override the requested values, and
- // will return the effective configuration in the response.
- UPDATE_CONFIG = 7;
- }
-
- Version version = 1;
- uint64 id = 2; // a unique ID for this request.
- Command command = 3;
- Source source = 4;
- Configuration config = 5;
-}
-
-// CommandResponse is a response to a command sent by the client.
-message CommandResponse {
- enum Result {
- OK = 0;
- ERR = 1;
- }
-
- uint64 id = 1; // for correlation with the request.
- Result result = 2;
- string error = 3;
-
- // effective_config is the effective configuration the server holds for
- // this connection. It is returned in response to HELLO and UPDATE_CONFIG
- // commands.
- Configuration effective_config = 4;
-}
-
-// ServerNotice represents a NOTICE sent from the server to the client.
-message ServerNotice {
- enum Kind {
- DISCARDING_EVENTS = 0;
- }
- Kind kind = 1;
-}
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-libp2p/core/network/conn.go b/vendor/github.com/libp2p/go-libp2p/core/network/conn.go
index 8554493e2..3be8cb0d6 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/network/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/network/conn.go
@@ -6,6 +6,7 @@ import (
ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
ma "github.com/multiformats/go-multiaddr"
)
@@ -32,6 +33,22 @@ type Conn interface {
// GetStreams returns all open streams over this conn.
GetStreams() []Stream
+
+ // IsClosed returns whether a connection is fully closed, so it can
+ // be garbage collected.
+ IsClosed() bool
+}
+
+// ConnectionState holds information about the connection.
+type ConnectionState struct {
+ // The stream multiplexer used on this connection (if any). For example: /yamux/1.0.0
+ StreamMultiplexer protocol.ID
+ // The security protocol used on this connection (if any). For example: /tls/1.0.0
+ Security protocol.ID
+ // the transport used on this connection. For example: tcp
+ Transport string
+ // indicates whether StreamMultiplexer was selected using inlined muxer negotiation
+ UsedEarlyMuxerNegotiation bool
}
// ConnSecurity is the interface that one can mix into a connection interface to
@@ -40,14 +57,14 @@ type ConnSecurity interface {
// LocalPeer returns our peer ID
LocalPeer() peer.ID
- // LocalPrivateKey returns our private key
- LocalPrivateKey() ic.PrivKey
-
// RemotePeer returns the peer ID of the remote peer.
RemotePeer() peer.ID
// RemotePublicKey returns the public key of the remote peer.
RemotePublicKey() ic.PubKey
+
+ // ConnState returns information about the connection state.
+ ConnState() ConnectionState
}
// ConnMultiaddrs is an interface mixin for connection types that provide multiaddr
diff --git a/vendor/github.com/libp2p/go-libp2p/core/network/network.go b/vendor/github.com/libp2p/go-libp2p/core/network/network.go
index bf96697d5..4cedb75d3 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/network/network.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/network/network.go
@@ -6,8 +6,10 @@
package network
import (
+ "bytes"
"context"
"io"
+ "sort"
"time"
"github.com/libp2p/go-libp2p/core/peer"
@@ -174,13 +176,43 @@ type Dialer interface {
// Peers returns the peers connected
Peers() []peer.ID
- // Conns returns the connections in this Netowrk
+ // Conns returns the connections in this Network
Conns() []Conn
- // ConnsToPeer returns the connections in this Netowrk for given peer.
+ // ConnsToPeer returns the connections in this Network for given peer.
ConnsToPeer(p peer.ID) []Conn
// Notify/StopNotify register and unregister a notifiee for signals
Notify(Notifiee)
StopNotify(Notifiee)
}
+
+// AddrDelay provides an address along with the delay after which the address
+// should be dialed
+type AddrDelay struct {
+ Addr ma.Multiaddr
+ Delay time.Duration
+}
+
+// DialRanker provides a schedule of dialing the provided addresses
+type DialRanker func([]ma.Multiaddr) []AddrDelay
+
+// DedupAddrs deduplicates addresses in place, leave only unique addresses.
+// It doesn't allocate.
+func DedupAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
+ if len(addrs) == 0 {
+ return addrs
+ }
+ sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) < 0 })
+ idx := 1
+ for i := 1; i < len(addrs); i++ {
+ if !addrs[i-1].Equal(addrs[i]) {
+ addrs[idx] = addrs[i]
+ idx++
+ }
+ }
+ for i := idx; i < len(addrs); i++ {
+ addrs[i] = nil
+ }
+ return addrs[:idx]
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/network/rcmgr.go b/vendor/github.com/libp2p/go-libp2p/core/network/rcmgr.go
index b60925756..524a28a8c 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/network/rcmgr.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/network/rcmgr.go
@@ -271,60 +271,57 @@ type ScopeStat struct {
}
// NullResourceManager is a stub for tests and initialization of default values
-var NullResourceManager ResourceManager = &nullResourceManager{}
-
-type nullResourceManager struct{}
-type nullScope struct{}
-
-var _ ResourceScope = (*nullScope)(nil)
-var _ ResourceScopeSpan = (*nullScope)(nil)
-var _ ServiceScope = (*nullScope)(nil)
-var _ ProtocolScope = (*nullScope)(nil)
-var _ PeerScope = (*nullScope)(nil)
-var _ ConnManagementScope = (*nullScope)(nil)
-var _ ConnScope = (*nullScope)(nil)
-var _ StreamManagementScope = (*nullScope)(nil)
-var _ StreamScope = (*nullScope)(nil)
+type NullResourceManager struct{}
+
+var _ ResourceScope = (*NullScope)(nil)
+var _ ResourceScopeSpan = (*NullScope)(nil)
+var _ ServiceScope = (*NullScope)(nil)
+var _ ProtocolScope = (*NullScope)(nil)
+var _ PeerScope = (*NullScope)(nil)
+var _ ConnManagementScope = (*NullScope)(nil)
+var _ ConnScope = (*NullScope)(nil)
+var _ StreamManagementScope = (*NullScope)(nil)
+var _ StreamScope = (*NullScope)(nil)
// NullScope is a stub for tests and initialization of default values
-var NullScope = &nullScope{}
+type NullScope struct{}
-func (n *nullResourceManager) ViewSystem(f func(ResourceScope) error) error {
- return f(NullScope)
+func (n *NullResourceManager) ViewSystem(f func(ResourceScope) error) error {
+ return f(&NullScope{})
}
-func (n *nullResourceManager) ViewTransient(f func(ResourceScope) error) error {
- return f(NullScope)
+func (n *NullResourceManager) ViewTransient(f func(ResourceScope) error) error {
+ return f(&NullScope{})
}
-func (n *nullResourceManager) ViewService(svc string, f func(ServiceScope) error) error {
- return f(NullScope)
+func (n *NullResourceManager) ViewService(svc string, f func(ServiceScope) error) error {
+ return f(&NullScope{})
}
-func (n *nullResourceManager) ViewProtocol(p protocol.ID, f func(ProtocolScope) error) error {
- return f(NullScope)
+func (n *NullResourceManager) ViewProtocol(p protocol.ID, f func(ProtocolScope) error) error {
+ return f(&NullScope{})
}
-func (n *nullResourceManager) ViewPeer(p peer.ID, f func(PeerScope) error) error {
- return f(NullScope)
+func (n *NullResourceManager) ViewPeer(p peer.ID, f func(PeerScope) error) error {
+ return f(&NullScope{})
}
-func (n *nullResourceManager) OpenConnection(dir Direction, usefd bool, endpoint multiaddr.Multiaddr) (ConnManagementScope, error) {
- return NullScope, nil
+func (n *NullResourceManager) OpenConnection(dir Direction, usefd bool, endpoint multiaddr.Multiaddr) (ConnManagementScope, error) {
+ return &NullScope{}, nil
}
-func (n *nullResourceManager) OpenStream(p peer.ID, dir Direction) (StreamManagementScope, error) {
- return NullScope, nil
+func (n *NullResourceManager) OpenStream(p peer.ID, dir Direction) (StreamManagementScope, error) {
+ return &NullScope{}, nil
}
-func (n *nullResourceManager) Close() error {
+func (n *NullResourceManager) Close() error {
return nil
}
-func (n *nullScope) ReserveMemory(size int, prio uint8) error { return nil }
-func (n *nullScope) ReleaseMemory(size int) {}
-func (n *nullScope) Stat() ScopeStat { return ScopeStat{} }
-func (n *nullScope) BeginSpan() (ResourceScopeSpan, error) { return NullScope, nil }
-func (n *nullScope) Done() {}
-func (n *nullScope) Name() string { return "" }
-func (n *nullScope) Protocol() protocol.ID { return "" }
-func (n *nullScope) Peer() peer.ID { return "" }
-func (n *nullScope) PeerScope() PeerScope { return NullScope }
-func (n *nullScope) SetPeer(peer.ID) error { return nil }
-func (n *nullScope) ProtocolScope() ProtocolScope { return NullScope }
-func (n *nullScope) SetProtocol(proto protocol.ID) error { return nil }
-func (n *nullScope) ServiceScope() ServiceScope { return NullScope }
-func (n *nullScope) SetService(srv string) error { return nil }
+func (n *NullScope) ReserveMemory(size int, prio uint8) error { return nil }
+func (n *NullScope) ReleaseMemory(size int) {}
+func (n *NullScope) Stat() ScopeStat { return ScopeStat{} }
+func (n *NullScope) BeginSpan() (ResourceScopeSpan, error) { return &NullScope{}, nil }
+func (n *NullScope) Done() {}
+func (n *NullScope) Name() string { return "" }
+func (n *NullScope) Protocol() protocol.ID { return "" }
+func (n *NullScope) Peer() peer.ID { return "" }
+func (n *NullScope) PeerScope() PeerScope { return &NullScope{} }
+func (n *NullScope) SetPeer(peer.ID) error { return nil }
+func (n *NullScope) ProtocolScope() ProtocolScope { return &NullScope{} }
+func (n *NullScope) SetProtocol(proto protocol.ID) error { return nil }
+func (n *NullScope) ServiceScope() ServiceScope { return &NullScope{} }
+func (n *NullScope) SetService(srv string) error { return nil }
diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/addrinfo.go b/vendor/github.com/libp2p/go-libp2p/core/peer/addrinfo.go
index 19b07a4b7..b479df9e0 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/peer/addrinfo.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/peer/addrinfo.go
@@ -86,7 +86,6 @@ func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) {
// AddrInfoToP2pAddrs converts an AddrInfo to a list of Multiaddrs.
func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
- var addrs []ma.Multiaddr
p2ppart, err := ma.NewComponent("p2p", Encode(pi.ID))
if err != nil {
return nil, err
@@ -94,6 +93,7 @@ func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
if len(pi.Addrs) == 0 {
return []ma.Multiaddr{p2ppart}, nil
}
+ addrs := make([]ma.Multiaddr, 0, len(pi.Addrs))
for _, addr := range pi.Addrs {
addrs = append(addrs, addr.Encapsulate(p2ppart))
}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile
deleted file mode 100644
index 7cf8222f8..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go
index 36040c3c2..2aa8a07aa 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go
@@ -1,27 +1,24 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: peer_record.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/peer_record.proto
-package peer_pb
+package pb
import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- proto "github.com/gogo/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// PeerRecord messages contain information that is useful to share with other peers.
// Currently, a PeerRecord contains the public listen addresses for a peer, but this
@@ -32,6 +29,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// See https://github.com/libp2p/go-libp2p/core/record/pb/envelope.proto for
// the SignedEnvelope definition.
type PeerRecord struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// peer_id contains a libp2p peer id in its binary representation.
PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
// seq contains a monotonically-increasing sequence counter to order PeerRecords in time.
@@ -40,56 +41,55 @@ type PeerRecord struct {
Addresses []*PeerRecord_AddressInfo `protobuf:"bytes,3,rep,name=addresses,proto3" json:"addresses,omitempty"`
}
-func (m *PeerRecord) Reset() { *m = PeerRecord{} }
-func (m *PeerRecord) String() string { return proto.CompactTextString(m) }
-func (*PeerRecord) ProtoMessage() {}
-func (*PeerRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc0d8059ab0ad14d, []int{0}
+func (x *PeerRecord) Reset() {
+ *x = PeerRecord{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_peer_record_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *PeerRecord) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (x *PeerRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *PeerRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PeerRecord.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+
+func (*PeerRecord) ProtoMessage() {}
+
+func (x *PeerRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_peer_record_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *PeerRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PeerRecord.Merge(m, src)
-}
-func (m *PeerRecord) XXX_Size() int {
- return m.Size()
-}
-func (m *PeerRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_PeerRecord.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_PeerRecord proto.InternalMessageInfo
+// Deprecated: Use PeerRecord.ProtoReflect.Descriptor instead.
+func (*PeerRecord) Descriptor() ([]byte, []int) {
+ return file_pb_peer_record_proto_rawDescGZIP(), []int{0}
+}
-func (m *PeerRecord) GetPeerId() []byte {
- if m != nil {
- return m.PeerId
+func (x *PeerRecord) GetPeerId() []byte {
+ if x != nil {
+ return x.PeerId
}
return nil
}
-func (m *PeerRecord) GetSeq() uint64 {
- if m != nil {
- return m.Seq
+func (x *PeerRecord) GetSeq() uint64 {
+ if x != nil {
+ return x.Seq
}
return 0
}
-func (m *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo {
- if m != nil {
- return m.Addresses
+func (x *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo {
+ if x != nil {
+ return x.Addresses
}
return nil
}
@@ -97,511 +97,143 @@ func (m *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo {
// AddressInfo is a wrapper around a binary multiaddr. It is defined as a
// separate message to allow us to add per-address metadata in the future.
type PeerRecord_AddressInfo struct {
- Multiaddr []byte `protobuf:"bytes,1,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"`
-}
-
-func (m *PeerRecord_AddressInfo) Reset() { *m = PeerRecord_AddressInfo{} }
-func (m *PeerRecord_AddressInfo) String() string { return proto.CompactTextString(m) }
-func (*PeerRecord_AddressInfo) ProtoMessage() {}
-func (*PeerRecord_AddressInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc0d8059ab0ad14d, []int{0, 0}
-}
-func (m *PeerRecord_AddressInfo) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PeerRecord_AddressInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PeerRecord_AddressInfo.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PeerRecord_AddressInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PeerRecord_AddressInfo.Merge(m, src)
-}
-func (m *PeerRecord_AddressInfo) XXX_Size() int {
- return m.Size()
-}
-func (m *PeerRecord_AddressInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_PeerRecord_AddressInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PeerRecord_AddressInfo proto.InternalMessageInfo
-
-func (m *PeerRecord_AddressInfo) GetMultiaddr() []byte {
- if m != nil {
- return m.Multiaddr
- }
- return nil
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func init() {
- proto.RegisterType((*PeerRecord)(nil), "peer.pb.PeerRecord")
- proto.RegisterType((*PeerRecord_AddressInfo)(nil), "peer.pb.PeerRecord.AddressInfo")
-}
-
-func init() { proto.RegisterFile("peer_record.proto", fileDescriptor_dc0d8059ab0ad14d) }
-
-var fileDescriptor_dc0d8059ab0ad14d = []byte{
- // 189 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x48, 0x4d, 0x2d,
- 0x8a, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07,
- 0x09, 0xe9, 0x15, 0x24, 0x29, 0x2d, 0x66, 0xe4, 0xe2, 0x0a, 0x48, 0x4d, 0x2d, 0x0a, 0x02, 0xcb,
- 0x0a, 0x89, 0x73, 0x81, 0x65, 0xe2, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0xd8,
- 0x40, 0x5c, 0xcf, 0x14, 0x21, 0x01, 0x2e, 0xe6, 0xe2, 0xd4, 0x42, 0x09, 0x26, 0x05, 0x46, 0x0d,
- 0x96, 0x20, 0x10, 0x53, 0xc8, 0x96, 0x8b, 0x33, 0x31, 0x25, 0xa5, 0x28, 0xb5, 0xb8, 0x38, 0xb5,
- 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x5e, 0x0f, 0x6a, 0xac, 0x1e, 0xc2, 0x48, 0x3d,
- 0x47, 0x88, 0x22, 0xcf, 0xbc, 0xb4, 0xfc, 0x20, 0x84, 0x0e, 0x29, 0x6d, 0x2e, 0x6e, 0x24, 0x19,
- 0x21, 0x19, 0x2e, 0xce, 0xdc, 0xd2, 0x9c, 0x92, 0x4c, 0x90, 0x02, 0xa8, 0xd5, 0x08, 0x01, 0x27,
- 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63,
- 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xfb, 0xc7, 0x18,
- 0x10, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x99, 0x56, 0x19, 0xe4, 0x00, 0x00, 0x00,
-}
-
-func (m *PeerRecord) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PeerRecord) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PeerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Addresses) > 0 {
- for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintPeerRecord(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Seq != 0 {
- i = encodeVarintPeerRecord(dAtA, i, uint64(m.Seq))
- i--
- dAtA[i] = 0x10
- }
- if len(m.PeerId) > 0 {
- i -= len(m.PeerId)
- copy(dAtA[i:], m.PeerId)
- i = encodeVarintPeerRecord(dAtA, i, uint64(len(m.PeerId)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
+ Multiaddr []byte `protobuf:"bytes,1,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"`
}
-func (m *PeerRecord_AddressInfo) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *PeerRecord_AddressInfo) Reset() {
+ *x = PeerRecord_AddressInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_peer_record_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return dAtA[:n], nil
}
-func (m *PeerRecord_AddressInfo) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (x *PeerRecord_AddressInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *PeerRecord_AddressInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Multiaddr) > 0 {
- i -= len(m.Multiaddr)
- copy(dAtA[i:], m.Multiaddr)
- i = encodeVarintPeerRecord(dAtA, i, uint64(len(m.Multiaddr)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
+func (*PeerRecord_AddressInfo) ProtoMessage() {}
-func encodeVarintPeerRecord(dAtA []byte, offset int, v uint64) int {
- offset -= sovPeerRecord(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *PeerRecord) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.PeerId)
- if l > 0 {
- n += 1 + l + sovPeerRecord(uint64(l))
- }
- if m.Seq != 0 {
- n += 1 + sovPeerRecord(uint64(m.Seq))
- }
- if len(m.Addresses) > 0 {
- for _, e := range m.Addresses {
- l = e.Size()
- n += 1 + l + sovPeerRecord(uint64(l))
+func (x *PeerRecord_AddressInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_peer_record_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
+ return ms
}
- return n
+ return mi.MessageOf(x)
}
-func (m *PeerRecord_AddressInfo) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Multiaddr)
- if l > 0 {
- n += 1 + l + sovPeerRecord(uint64(l))
- }
- return n
-}
-
-func sovPeerRecord(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozPeerRecord(x uint64) (n int) {
- return sovPeerRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+// Deprecated: Use PeerRecord_AddressInfo.ProtoReflect.Descriptor instead.
+func (*PeerRecord_AddressInfo) Descriptor() ([]byte, []int) {
+ return file_pb_peer_record_proto_rawDescGZIP(), []int{0, 0}
}
-func (m *PeerRecord) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPeerRecord
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerId = append(m.PeerId[:0], dAtA[iNdEx:postIndex]...)
- if m.PeerId == nil {
- m.PeerId = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Seq", wireType)
- }
- m.Seq = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Seq |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPeerRecord
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addresses = append(m.Addresses, &PeerRecord_AddressInfo{})
- if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPeerRecord(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *PeerRecord_AddressInfo) GetMultiaddr() []byte {
+ if x != nil {
+ return x.Multiaddr
}
return nil
}
-func (m *PeerRecord_AddressInfo) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AddressInfo: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AddressInfo: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Multiaddr", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPeerRecord
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Multiaddr = append(m.Multiaddr[:0], dAtA[iNdEx:postIndex]...)
- if m.Multiaddr == nil {
- m.Multiaddr = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPeerRecord(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPeerRecord
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipPeerRecord(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPeerRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthPeerRecord
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupPeerRecord
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPeerRecord
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
+var File_pb_peer_record_proto protoreflect.FileDescriptor
+
+var file_pb_peer_record_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x70, 0x62, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x70, 0x65, 0x65, 0x72, 0x2e, 0x70, 0x62, 0x22,
+ 0xa3, 0x01, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x17,
+ 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x09, 0x61, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70,
+ 0x65, 0x65, 0x72, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0x2b, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69,
+ 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x61, 0x64, 0x64, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
- ErrInvalidLengthPeerRecord = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPeerRecord = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupPeerRecord = fmt.Errorf("proto: unexpected end of group")
+ file_pb_peer_record_proto_rawDescOnce sync.Once
+ file_pb_peer_record_proto_rawDescData = file_pb_peer_record_proto_rawDesc
)
+
+func file_pb_peer_record_proto_rawDescGZIP() []byte {
+ file_pb_peer_record_proto_rawDescOnce.Do(func() {
+ file_pb_peer_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_peer_record_proto_rawDescData)
+ })
+ return file_pb_peer_record_proto_rawDescData
+}
+
+var file_pb_peer_record_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_pb_peer_record_proto_goTypes = []interface{}{
+ (*PeerRecord)(nil), // 0: peer.pb.PeerRecord
+ (*PeerRecord_AddressInfo)(nil), // 1: peer.pb.PeerRecord.AddressInfo
+}
+var file_pb_peer_record_proto_depIdxs = []int32{
+ 1, // 0: peer.pb.PeerRecord.addresses:type_name -> peer.pb.PeerRecord.AddressInfo
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pb_peer_record_proto_init() }
+func file_pb_peer_record_proto_init() {
+ if File_pb_peer_record_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_peer_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerRecord); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pb_peer_record_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PeerRecord_AddressInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_peer_record_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_peer_record_proto_goTypes,
+ DependencyIndexes: file_pb_peer_record_proto_depIdxs,
+ MessageInfos: file_pb_peer_record_proto_msgTypes,
+ }.Build()
+ File_pb_peer_record_proto = out.File
+ file_pb_peer_record_proto_rawDesc = nil
+ file_pb_peer_record_proto_goTypes = nil
+ file_pb_peer_record_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go b/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go
index e3ac3f2c9..5fd1cd50c 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go
@@ -40,8 +40,6 @@ func (id *ID) UnmarshalBinary(data []byte) error {
return id.Unmarshal(data)
}
-// Size implements Gogo's proto.Sizer, but we omit the compile-time assertion to avoid introducing a hard
-// dependency on gogo.
func (id ID) Size() int {
return len([]byte(id))
}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/record.go b/vendor/github.com/libp2p/go-libp2p/core/peer/record.go
index 9f8861af8..0fc7e552d 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/peer/record.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/peer/record.go
@@ -6,14 +6,16 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/internal/catch"
- pb "github.com/libp2p/go-libp2p/core/peer/pb"
+ "github.com/libp2p/go-libp2p/core/peer/pb"
"github.com/libp2p/go-libp2p/core/record"
ma "github.com/multiformats/go-multiaddr"
- "github.com/gogo/protobuf/proto"
+ "google.golang.org/protobuf/proto"
)
+//go:generate protoc --proto_path=$PWD:$PWD/../.. --go_out=. --go_opt=Mpb/peer_record.proto=./pb pb/peer_record.proto
+
var _ record.Record = (*PeerRecord)(nil)
func init() {
@@ -231,7 +233,7 @@ func (r *PeerRecord) ToProtobuf() (*pb.PeerRecord, error) {
}
func addrsFromProtobuf(addrs []*pb.PeerRecord_AddressInfo) []ma.Multiaddr {
- var out []ma.Multiaddr
+ out := make([]ma.Multiaddr, 0, len(addrs))
for _, addr := range addrs {
a, err := ma.NewMultiaddrBytes(addr.Multiaddr)
if err != nil {
@@ -243,7 +245,7 @@ func addrsFromProtobuf(addrs []*pb.PeerRecord_AddressInfo) []ma.Multiaddr {
}
func addrsToProtobuf(addrs []ma.Multiaddr) []*pb.PeerRecord_AddressInfo {
- var out []*pb.PeerRecord_AddressInfo
+ out := make([]*pb.PeerRecord_AddressInfo, 0, len(addrs))
for _, addr := range addrs {
out = append(out, &pb.PeerRecord_AddressInfo{Multiaddr: addr.Bytes()})
}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go b/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go
index 213513bcd..b63582afe 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go
@@ -11,6 +11,7 @@ import (
ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/record"
ma "github.com/multiformats/go-multiaddr"
@@ -25,11 +26,6 @@ var (
// TempAddrTTL is the ttl used for a short lived address.
TempAddrTTL = time.Minute * 2
- // ProviderAddrTTL is the TTL of an address we've received from a provider.
- // This is also a temporary address, but lasts longer. After this expires,
- // the records we return will require an extra lookup.
- ProviderAddrTTL = time.Minute * 30
-
// RecentlyConnectedAddrTTL is used when we recently connected to a peer.
// It means that we are reasonably certain of the peer's address.
RecentlyConnectedAddrTTL = time.Minute * 30
@@ -235,19 +231,19 @@ type Metrics interface {
// ProtoBook tracks the protocols supported by peers.
type ProtoBook interface {
- GetProtocols(peer.ID) ([]string, error)
- AddProtocols(peer.ID, ...string) error
- SetProtocols(peer.ID, ...string) error
- RemoveProtocols(peer.ID, ...string) error
+ GetProtocols(peer.ID) ([]protocol.ID, error)
+ AddProtocols(peer.ID, ...protocol.ID) error
+ SetProtocols(peer.ID, ...protocol.ID) error
+ RemoveProtocols(peer.ID, ...protocol.ID) error
// SupportsProtocols returns the set of protocols the peer supports from among the given protocols.
// If the returned error is not nil, the result is indeterminate.
- SupportsProtocols(peer.ID, ...string) ([]string, error)
+ SupportsProtocols(peer.ID, ...protocol.ID) ([]protocol.ID, error)
// FirstSupportedProtocol returns the first protocol that the peer supports among the given protocols.
- // If the peer does not support any of the given protocols, this function will return an empty string and a nil error.
+ // If the peer does not support any of the given protocols, this function will return an empty protocol.ID and a nil error.
// If the returned error is not nil, the result is indeterminate.
- FirstSupportedProtocol(peer.ID, ...string) (string, error)
+ FirstSupportedProtocol(peer.ID, ...protocol.ID) (protocol.ID, error)
// RemovePeer removes all protocols associated with a peer.
RemovePeer(peer.ID)
diff --git a/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go b/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go
index e00ba1ad1..683ef56fe 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go
@@ -3,6 +3,8 @@ package protocol
import (
"io"
+
+ "github.com/multiformats/go-multistream"
)
// HandlerFunc is a user-provided function used by the Router to
@@ -11,7 +13,7 @@ import (
// Will be invoked with the protocol ID string as the first argument,
// which may differ from the ID used for registration if the handler
// was registered using a match function.
-type HandlerFunc = func(protocol string, rwc io.ReadWriteCloser) error
+type HandlerFunc = multistream.HandlerFunc[ID]
// Router is an interface that allows users to add and remove protocol handlers,
// which will be invoked when incoming stream requests for registered protocols
@@ -25,7 +27,7 @@ type Router interface {
// AddHandler registers the given handler to be invoked for
// an exact literal match of the given protocol ID string.
- AddHandler(protocol string, handler HandlerFunc)
+ AddHandler(protocol ID, handler HandlerFunc)
// AddHandlerWithFunc registers the given handler to be invoked
// when the provided match function returns true.
@@ -35,38 +37,27 @@ type Router interface {
// the protocol. Note that the protocol ID argument is not
// used for matching; if you want to match the protocol ID
// string exactly, you must check for it in your match function.
- AddHandlerWithFunc(protocol string, match func(string) bool, handler HandlerFunc)
+ AddHandlerWithFunc(protocol ID, match func(ID) bool, handler HandlerFunc)
// RemoveHandler removes the registered handler (if any) for the
// given protocol ID string.
- RemoveHandler(protocol string)
+ RemoveHandler(protocol ID)
// Protocols returns a list of all registered protocol ID strings.
// Note that the Router may be able to handle protocol IDs not
// included in this list if handlers were added with match functions
// using AddHandlerWithFunc.
- Protocols() []string
+ Protocols() []ID
}
// Negotiator is a component capable of reaching agreement over what protocols
// to use for inbound streams of communication.
type Negotiator interface {
- // NegotiateLazy will return the registered protocol handler to use
- // for a given inbound stream, returning as soon as the protocol has been
- // determined. Returns an error if negotiation fails.
- //
- // NegotiateLazy may return before all protocol negotiation responses have been
- // written to the stream. This is in contrast to Negotiate, which will block until
- // the Negotiator is finished with the stream.
- //
- // Deprecated: use Negotiate instead.
- NegotiateLazy(rwc io.ReadWriteCloser) (io.ReadWriteCloser, string, HandlerFunc, error)
-
// Negotiate will return the registered protocol handler to use for a given
// inbound stream, returning after the protocol has been determined and the
// Negotiator has finished using the stream for negotiation. Returns an
// error if negotiation fails.
- Negotiate(rwc io.ReadWriteCloser) (string, HandlerFunc, error)
+ Negotiate(rwc io.ReadWriteCloser) (ID, HandlerFunc, error)
// Handle calls Negotiate to determine which protocol handler to use for an
// inbound stream, then invokes the protocol handler function, passing it
diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go b/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go
index 38811994b..86ad14253 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go
@@ -8,14 +8,16 @@ import (
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/internal/catch"
- pb "github.com/libp2p/go-libp2p/core/record/pb"
+ "github.com/libp2p/go-libp2p/core/record/pb"
pool "github.com/libp2p/go-buffer-pool"
- "github.com/gogo/protobuf/proto"
"github.com/multiformats/go-varint"
+ "google.golang.org/protobuf/proto"
)
+//go:generate protoc --proto_path=$PWD:$PWD/../.. --go_out=. --go_opt=Mpb/envelope.proto=./pb pb/envelope.proto
+
// Envelope contains an arbitrary []byte payload, signed by a libp2p peer.
//
// Envelopes are signed in the context of a particular "domain", which is a
@@ -104,11 +106,6 @@ func Seal(rec Record, privateKey crypto.PrivKey) (*Envelope, error) {
// doSomethingWithPeerRecord(peerRec)
// }
//
-// Important: you MUST check the error value before using the returned Envelope. In some error
-// cases, including when the envelope signature is invalid, both the Envelope and an error will
-// be returned. This allows you to inspect the unmarshalled but invalid Envelope. As a result,
-// you must not assume that any non-nil Envelope returned from this function is valid.
-//
// If the Envelope signature is valid, but no Record type is registered for the Envelope's
// PayloadType, ErrPayloadTypeNotRegistered will be returned, along with the Envelope and
// a nil Record.
@@ -120,12 +117,12 @@ func ConsumeEnvelope(data []byte, domain string) (envelope *Envelope, rec Record
err = e.validate(domain)
if err != nil {
- return e, nil, fmt.Errorf("failed to validate envelope: %w", err)
+ return nil, nil, fmt.Errorf("failed to validate envelope: %w", err)
}
rec, err = e.Record()
if err != nil {
- return e, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err)
+ return nil, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err)
}
return e, rec, nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile
deleted file mode 100644
index 7cf8222f8..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go
index a99902175..1d3c7f25b 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go
@@ -1,28 +1,25 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: envelope.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/envelope.proto
-package record_pb
+package pb
import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- proto "github.com/gogo/protobuf/proto"
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// Envelope encloses a signed payload produced by a peer, along with the public
// key of the keypair it was signed with so that it can be statelessly validated
@@ -32,6 +29,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// can be deserialized deterministically. Often, this byte string is a
// multicodec.
type Envelope struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// public_key is the public key of the keypair the enclosed payload was
// signed with.
PublicKey *pb.PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
@@ -46,460 +47,146 @@ type Envelope struct {
Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"`
}
-func (m *Envelope) Reset() { *m = Envelope{} }
-func (m *Envelope) String() string { return proto.CompactTextString(m) }
-func (*Envelope) ProtoMessage() {}
-func (*Envelope) Descriptor() ([]byte, []int) {
- return fileDescriptor_ee266e8c558e9dc5, []int{0}
-}
-func (m *Envelope) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Envelope.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (x *Envelope) Reset() {
+ *x = Envelope{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_envelope_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
}
-func (m *Envelope) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Envelope.Merge(m, src)
-}
-func (m *Envelope) XXX_Size() int {
- return m.Size()
-}
-func (m *Envelope) XXX_DiscardUnknown() {
- xxx_messageInfo_Envelope.DiscardUnknown(m)
+
+func (x *Envelope) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_Envelope proto.InternalMessageInfo
+func (*Envelope) ProtoMessage() {}
-func (m *Envelope) GetPublicKey() *pb.PublicKey {
- if m != nil {
- return m.PublicKey
+func (x *Envelope) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_envelope_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (m *Envelope) GetPayloadType() []byte {
- if m != nil {
- return m.PayloadType
- }
- return nil
+// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
+func (*Envelope) Descriptor() ([]byte, []int) {
+ return file_pb_envelope_proto_rawDescGZIP(), []int{0}
}
-func (m *Envelope) GetPayload() []byte {
- if m != nil {
- return m.Payload
+func (x *Envelope) GetPublicKey() *pb.PublicKey {
+ if x != nil {
+ return x.PublicKey
}
return nil
}
-func (m *Envelope) GetSignature() []byte {
- if m != nil {
- return m.Signature
+func (x *Envelope) GetPayloadType() []byte {
+ if x != nil {
+ return x.PayloadType
}
return nil
}
-func init() {
- proto.RegisterType((*Envelope)(nil), "record.pb.Envelope")
-}
-
-func init() { proto.RegisterFile("envelope.proto", fileDescriptor_ee266e8c558e9dc5) }
-
-var fileDescriptor_ee266e8c558e9dc5 = []byte{
- // 205 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcd, 0x2b, 0x4b,
- 0xcd, 0xc9, 0x2f, 0x48, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2c, 0x4a, 0x4d, 0xce,
- 0x2f, 0x4a, 0xd1, 0x2b, 0x48, 0x92, 0x12, 0x4b, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, 0x2f, 0x48,
- 0xd2, 0x87, 0xb0, 0x20, 0x4a, 0x94, 0x66, 0x31, 0x72, 0x71, 0xb8, 0x42, 0x75, 0x09, 0x19, 0x73,
- 0x71, 0x15, 0x94, 0x26, 0xe5, 0x64, 0x26, 0xc7, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
- 0x70, 0x1b, 0x89, 0xe8, 0xc1, 0xd4, 0x27, 0xe9, 0x05, 0x80, 0x25, 0xbd, 0x53, 0x2b, 0x83, 0x38,
- 0x0b, 0x60, 0x4c, 0x21, 0x45, 0x2e, 0x9e, 0x82, 0xc4, 0xca, 0x9c, 0xfc, 0xc4, 0x94, 0xf8, 0x92,
- 0xca, 0x82, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x6e, 0xa8, 0x58, 0x48, 0x65, 0x41,
- 0xaa, 0x90, 0x04, 0x17, 0x3b, 0x94, 0x2b, 0xc1, 0x0c, 0x96, 0x85, 0x71, 0x85, 0x64, 0xb8, 0x38,
- 0x8b, 0x33, 0xd3, 0xf3, 0x12, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x58, 0xc1, 0x72, 0x08, 0x01, 0x27,
- 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63,
- 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xbb, 0xde, 0x18,
- 0x10, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x0b, 0xd9, 0x6d, 0xf2, 0x00, 0x00, 0x00,
-}
-
-func (m *Envelope) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *Envelope) GetPayload() []byte {
+ if x != nil {
+ return x.Payload
}
- return dAtA[:n], nil
-}
-
-func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ return nil
}
-func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Signature) > 0 {
- i -= len(m.Signature)
- copy(dAtA[i:], m.Signature)
- i = encodeVarintEnvelope(dAtA, i, uint64(len(m.Signature)))
- i--
- dAtA[i] = 0x2a
+func (x *Envelope) GetSignature() []byte {
+ if x != nil {
+ return x.Signature
}
- if len(m.Payload) > 0 {
- i -= len(m.Payload)
- copy(dAtA[i:], m.Payload)
- i = encodeVarintEnvelope(dAtA, i, uint64(len(m.Payload)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.PayloadType) > 0 {
- i -= len(m.PayloadType)
- copy(dAtA[i:], m.PayloadType)
- i = encodeVarintEnvelope(dAtA, i, uint64(len(m.PayloadType)))
- i--
- dAtA[i] = 0x12
- }
- if m.PublicKey != nil {
- {
- size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintEnvelope(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
+ return nil
}
-func encodeVarintEnvelope(dAtA []byte, offset int, v uint64) int {
- offset -= sovEnvelope(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Envelope) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.PublicKey != nil {
- l = m.PublicKey.Size()
- n += 1 + l + sovEnvelope(uint64(l))
- }
- l = len(m.PayloadType)
- if l > 0 {
- n += 1 + l + sovEnvelope(uint64(l))
- }
- l = len(m.Payload)
- if l > 0 {
- n += 1 + l + sovEnvelope(uint64(l))
- }
- l = len(m.Signature)
- if l > 0 {
- n += 1 + l + sovEnvelope(uint64(l))
- }
- return n
-}
+var File_pb_envelope_proto protoreflect.FileDescriptor
-func sovEnvelope(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozEnvelope(x uint64) (n int) {
- return sovEnvelope(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+var file_pb_envelope_proto_rawDesc = []byte{
+ 0x0a, 0x11, 0x70, 0x62, 0x2f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x62, 0x1a, 0x1b,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x08,
+ 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
+ 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a,
+ 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (m *Envelope) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Envelope: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthEnvelope
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthEnvelope
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.PublicKey == nil {
- m.PublicKey = &pb.PublicKey{}
- }
- if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PayloadType", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthEnvelope
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthEnvelope
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PayloadType = append(m.PayloadType[:0], dAtA[iNdEx:postIndex]...)
- if m.PayloadType == nil {
- m.PayloadType = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthEnvelope
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthEnvelope
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...)
- if m.Payload == nil {
- m.Payload = []byte{}
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthEnvelope
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthEnvelope
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
- if m.Signature == nil {
- m.Signature = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipEnvelope(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthEnvelope
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthEnvelope
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipEnvelope(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEnvelope
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthEnvelope
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupEnvelope
+var (
+ file_pb_envelope_proto_rawDescOnce sync.Once
+ file_pb_envelope_proto_rawDescData = file_pb_envelope_proto_rawDesc
+)
+
+func file_pb_envelope_proto_rawDescGZIP() []byte {
+ file_pb_envelope_proto_rawDescOnce.Do(func() {
+ file_pb_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_envelope_proto_rawDescData)
+ })
+ return file_pb_envelope_proto_rawDescData
+}
+
+var file_pb_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pb_envelope_proto_goTypes = []interface{}{
+ (*Envelope)(nil), // 0: record.pb.Envelope
+ (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey
+}
+var file_pb_envelope_proto_depIdxs = []int32{
+ 1, // 0: record.pb.Envelope.public_key:type_name -> crypto.pb.PublicKey
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pb_envelope_proto_init() }
+func file_pb_envelope_proto_init() {
+ if File_pb_envelope_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_envelope_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Envelope); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEnvelope
- }
- if depth == 0 {
- return iNdEx, nil
}
}
- return 0, io.ErrUnexpectedEOF
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_envelope_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_envelope_proto_goTypes,
+ DependencyIndexes: file_pb_envelope_proto_depIdxs,
+ MessageInfos: file_pb_envelope_proto_msgTypes,
+ }.Build()
+ File_pb_envelope_proto = out.File
+ file_pb_envelope_proto_rawDesc = nil
+ file_pb_envelope_proto_goTypes = nil
+ file_pb_envelope_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthEnvelope = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowEnvelope = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupEnvelope = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto
index ca3555fbf..05071ccd7 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto
+++ b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package record.pb;
-import "crypto/pb/crypto.proto";
+import "core/crypto/pb/crypto.proto";
// Envelope encloses a signed payload produced by a peer, along with the public
// key of the keypair it was signed with so that it can be statelessly validated
diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go
index 12bd1842b..9ed20f093 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go
@@ -10,13 +10,19 @@ import (
"net"
ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/sec"
- pb "github.com/libp2p/go-libp2p/core/sec/insecure/pb"
+ "github.com/libp2p/go-libp2p/core/sec/insecure/pb"
"github.com/libp2p/go-msgio"
+
+ "google.golang.org/protobuf/proto"
)
+//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/plaintext.proto=./pb pb/plaintext.proto
+
// ID is the multistream-select protocol ID that should be used when identifying
// this security transport.
const ID = "/plaintext/2.0.0"
@@ -27,18 +33,20 @@ const ID = "/plaintext/2.0.0"
// peer presents as their ID and public key.
// No authentication of the remote identity is performed.
type Transport struct {
- id peer.ID
- key ci.PrivKey
+ id peer.ID
+ key ci.PrivKey
+ protocolID protocol.ID
}
-// NewWithIdentity constructs a new insecure transport. The provided private key
-// is stored and returned from LocalPrivateKey to satisfy the
-// SecureTransport interface, and the public key is sent to
+var _ sec.SecureTransport = &Transport{}
+
+// NewWithIdentity constructs a new insecure transport. The public key is sent to
// remote peers. No security is provided.
-func NewWithIdentity(id peer.ID, key ci.PrivKey) *Transport {
+func NewWithIdentity(protocolID protocol.ID, id peer.ID, key ci.PrivKey) *Transport {
return &Transport{
- id: id,
- key: key,
+ protocolID: protocolID,
+ id: id,
+ key: key,
}
}
@@ -47,12 +55,6 @@ func (t *Transport) LocalPeer() peer.ID {
return t.id
}
-// LocalPrivateKey returns the local private key.
-// This key is used only for identity generation and provides no security.
-func (t *Transport) LocalPrivateKey() ci.PrivKey {
- return t.key
-}
-
// SecureInbound *pretends to secure* an inbound connection to the given peer.
// It sends the local peer's ID and public key, and receives the same from the remote peer.
// No validation is performed as to the authenticity or ownership of the provided public key,
@@ -60,19 +62,18 @@ func (t *Transport) LocalPrivateKey() ci.PrivKey {
//
// SecureInbound may fail if the remote peer sends an ID and public key that are inconsistent
// with each other, or if a network error occurs during the ID exchange.
-func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+func (t *Transport) SecureInbound(_ context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
conn := &Conn{
- Conn: insecure,
- local: t.id,
- localPrivKey: t.key,
+ Conn: insecure,
+ local: t.id,
+ localPubKey: t.key.GetPublic(),
}
- err := conn.runHandshakeSync()
- if err != nil {
+ if err := conn.runHandshakeSync(); err != nil {
return nil, err
}
- if t.key != nil && p != "" && p != conn.remote {
+ if p != "" && p != conn.remote {
return nil, fmt.Errorf("remote peer sent unexpected peer ID. expected=%s received=%s", p, conn.remote)
}
@@ -87,19 +88,18 @@ func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer
// SecureOutbound may fail if the remote peer sends an ID and public key that are inconsistent
// with each other, or if the ID sent by the remote peer does not match the one dialed. It may
// also fail if a network error occurs during the ID exchange.
-func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+func (t *Transport) SecureOutbound(_ context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
conn := &Conn{
- Conn: insecure,
- local: t.id,
- localPrivKey: t.key,
+ Conn: insecure,
+ local: t.id,
+ localPubKey: t.key.GetPublic(),
}
- err := conn.runHandshakeSync()
- if err != nil {
+ if err := conn.runHandshakeSync(); err != nil {
return nil, err
}
- if t.key != nil && p != conn.remote {
+ if p != conn.remote {
return nil, fmt.Errorf("remote peer sent unexpected peer ID. expected=%s received=%s",
p, conn.remote)
}
@@ -107,15 +107,14 @@ func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p pee
return conn, nil
}
+func (t *Transport) ID() protocol.ID { return t.protocolID }
+
// Conn is the connection type returned by the insecure transport.
type Conn struct {
net.Conn
- local peer.ID
- remote peer.ID
-
- localPrivKey ci.PrivKey
- remotePubKey ci.PubKey
+ local, remote peer.ID
+ localPubKey, remotePubKey ci.PubKey
}
func makeExchangeMessage(pubkey ci.PubKey) (*pb.Exchange, error) {
@@ -136,12 +135,12 @@ func makeExchangeMessage(pubkey ci.PubKey) (*pb.Exchange, error) {
func (ic *Conn) runHandshakeSync() error {
// If we were initialized without keys, behave as in plaintext/1.0.0 (do nothing)
- if ic.localPrivKey == nil {
+ if ic.localPubKey == nil {
return nil
}
// Generate an Exchange message
- msg, err := makeExchangeMessage(ic.localPrivKey.GetPublic())
+ msg, err := makeExchangeMessage(ic.localPubKey)
if err != nil {
return err
}
@@ -180,7 +179,7 @@ func (ic *Conn) runHandshakeSync() error {
func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) {
const maxMessageSize = 1 << 16
- outBytes, err := out.Marshal()
+ outBytes, err := proto.Marshal(out)
if err != nil {
return nil, err
}
@@ -191,7 +190,7 @@ func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) {
}()
r := msgio.NewVarintReaderSize(rw, maxMessageSize)
- msg, err1 := r.ReadMsg()
+ b, err1 := r.ReadMsg()
// Always wait for the read to finish.
err2 := <-wresult
@@ -200,11 +199,11 @@ func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) {
return nil, err1
}
if err2 != nil {
- r.ReleaseMsg(msg)
+ r.ReleaseMsg(b)
return nil, err2
}
inMsg := new(pb.Exchange)
- err = inMsg.Unmarshal(msg)
+ err = proto.Unmarshal(b, inMsg)
return inMsg, err
}
@@ -225,9 +224,9 @@ func (ic *Conn) RemotePublicKey() ci.PubKey {
return ic.remotePubKey
}
-// LocalPrivateKey returns the private key for the local peer.
-func (ic *Conn) LocalPrivateKey() ci.PrivKey {
- return ic.localPrivKey
+// ConnState returns the security connection's state information.
+func (ic *Conn) ConnState() network.ConnectionState {
+ return network.ConnectionState{}
}
var _ sec.SecureTransport = (*Transport)(nil)
diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile
deleted file mode 100644
index 4fb825a4b..000000000
--- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(GOPATH)/src:../../../crypto/pb:. --gogofaster_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go
index cd8719d11..16b910b4d 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go
@@ -1,383 +1,156 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: plaintext.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/plaintext.proto
-package plaintext_pb
+package pb
import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- proto "github.com/gogo/protobuf/proto"
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type Exchange struct {
- Id []byte `protobuf:"bytes,1,opt,name=id" json:"id"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Pubkey *pb.PublicKey `protobuf:"bytes,2,opt,name=pubkey" json:"pubkey,omitempty"`
}
-func (m *Exchange) Reset() { *m = Exchange{} }
-func (m *Exchange) String() string { return proto.CompactTextString(m) }
-func (*Exchange) ProtoMessage() {}
-func (*Exchange) Descriptor() ([]byte, []int) {
- return fileDescriptor_aba144f73931b711, []int{0}
+func (x *Exchange) Reset() {
+ *x = Exchange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_plaintext_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Exchange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (x *Exchange) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Exchange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Exchange.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+
+func (*Exchange) ProtoMessage() {}
+
+func (x *Exchange) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_plaintext_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *Exchange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exchange.Merge(m, src)
-}
-func (m *Exchange) XXX_Size() int {
- return m.Size()
-}
-func (m *Exchange) XXX_DiscardUnknown() {
- xxx_messageInfo_Exchange.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Exchange proto.InternalMessageInfo
+// Deprecated: Use Exchange.ProtoReflect.Descriptor instead.
+func (*Exchange) Descriptor() ([]byte, []int) {
+ return file_pb_plaintext_proto_rawDescGZIP(), []int{0}
+}
-func (m *Exchange) GetId() []byte {
- if m != nil {
- return m.Id
+func (x *Exchange) GetId() []byte {
+ if x != nil {
+ return x.Id
}
return nil
}
-func (m *Exchange) GetPubkey() *pb.PublicKey {
- if m != nil {
- return m.Pubkey
+func (x *Exchange) GetPubkey() *pb.PublicKey {
+ if x != nil {
+ return x.Pubkey
}
return nil
}
-func init() {
- proto.RegisterType((*Exchange)(nil), "plaintext.pb.Exchange")
-}
-
-func init() { proto.RegisterFile("plaintext.proto", fileDescriptor_aba144f73931b711) }
-
-var fileDescriptor_aba144f73931b711 = []byte{
- // 187 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc8, 0x49, 0xcc,
- 0xcc, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x12, 0x48,
- 0x92, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0xc9, 0x4c,
- 0x2a, 0x30, 0x2a, 0xd0, 0x4f, 0xcf, 0xd7, 0x85, 0xb0, 0x74, 0x93, 0xf3, 0x8b, 0x52, 0xf5, 0x93,
- 0x8b, 0x2a, 0x0b, 0x4a, 0xf2, 0xf5, 0x0b, 0x92, 0xa0, 0x2c, 0x88, 0x31, 0x4a, 0x7e, 0x5c, 0x1c,
- 0xae, 0x15, 0xc9, 0x19, 0x89, 0x79, 0xe9, 0xa9, 0x42, 0x22, 0x5c, 0x4c, 0x99, 0x29, 0x12, 0x8c,
- 0x0a, 0x8c, 0x1a, 0x3c, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x31, 0x65, 0xa6, 0x08, 0xe9,
- 0x70, 0xb1, 0x15, 0x94, 0x26, 0x65, 0xa7, 0x56, 0x4a, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x1b, 0x89,
- 0xe8, 0xc1, 0x0c, 0x48, 0xd2, 0x0b, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xf6, 0x4e, 0xad, 0x0c, 0x82,
- 0xaa, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18,
- 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x06, 0x40, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x40, 0xde, 0x90, 0x0b, 0xc2, 0x00, 0x00, 0x00,
-}
-
-func (m *Exchange) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
+var File_pb_plaintext_proto protoreflect.FileDescriptor
-func (m *Exchange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+var file_pb_plaintext_proto_rawDesc = []byte{
+ 0x0a, 0x12, 0x70, 0x62, 0x2f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e,
+ 0x70, 0x62, 0x1a, 0x1b, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f,
+ 0x70, 0x62, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x48, 0x0a, 0x08, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x70,
+ 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
+ 0x79, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79,
}
-func (m *Exchange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Pubkey != nil {
- {
- size, err := m.Pubkey.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintPlaintext(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Id != nil {
- i -= len(m.Id)
- copy(dAtA[i:], m.Id)
- i = encodeVarintPlaintext(dAtA, i, uint64(len(m.Id)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
+var (
+ file_pb_plaintext_proto_rawDescOnce sync.Once
+ file_pb_plaintext_proto_rawDescData = file_pb_plaintext_proto_rawDesc
+)
-func encodeVarintPlaintext(dAtA []byte, offset int, v uint64) int {
- offset -= sovPlaintext(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Exchange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != nil {
- l = len(m.Id)
- n += 1 + l + sovPlaintext(uint64(l))
- }
- if m.Pubkey != nil {
- l = m.Pubkey.Size()
- n += 1 + l + sovPlaintext(uint64(l))
- }
- return n
+func file_pb_plaintext_proto_rawDescGZIP() []byte {
+ file_pb_plaintext_proto_rawDescOnce.Do(func() {
+ file_pb_plaintext_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_plaintext_proto_rawDescData)
+ })
+ return file_pb_plaintext_proto_rawDescData
}
-func sovPlaintext(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
+var file_pb_plaintext_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pb_plaintext_proto_goTypes = []interface{}{
+ (*Exchange)(nil), // 0: plaintext.pb.Exchange
+ (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey
}
-func sozPlaintext(x uint64) (n int) {
- return sovPlaintext(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+var file_pb_plaintext_proto_depIdxs = []int32{
+ 1, // 0: plaintext.pb.Exchange.pubkey:type_name -> crypto.pb.PublicKey
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
}
-func (m *Exchange) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlaintext
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Exchange: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Exchange: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlaintext
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPlaintext
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPlaintext
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...)
- if m.Id == nil {
- m.Id = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlaintext
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPlaintext
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthPlaintext
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Pubkey == nil {
- m.Pubkey = &pb.PublicKey{}
- }
- if err := m.Pubkey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPlaintext(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPlaintext
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthPlaintext
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func init() { file_pb_plaintext_proto_init() }
+func file_pb_plaintext_proto_init() {
+ if File_pb_plaintext_proto != nil {
+ return
}
- return nil
-}
-func skipPlaintext(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlaintext
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlaintext
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlaintext
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthPlaintext
+ if !protoimpl.UnsafeEnabled {
+ file_pb_plaintext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exchange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupPlaintext
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPlaintext
- }
- if depth == 0 {
- return iNdEx, nil
}
}
- return 0, io.ErrUnexpectedEOF
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_plaintext_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_plaintext_proto_goTypes,
+ DependencyIndexes: file_pb_plaintext_proto_depIdxs,
+ MessageInfos: file_pb_plaintext_proto_msgTypes,
+ }.Build()
+ File_pb_plaintext_proto = out.File
+ file_pb_plaintext_proto_rawDesc = nil
+ file_pb_plaintext_proto_goTypes = nil
+ file_pb_plaintext_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthPlaintext = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPlaintext = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupPlaintext = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto
index 0e792b3cf..634100bdc 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto
+++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto
@@ -2,7 +2,7 @@ syntax = "proto2";
package plaintext.pb;
-import "github.com/libp2p/go-libp2p/core/crypto/pb/crypto.proto";
+import "core/crypto/pb/crypto.proto";
message Exchange {
optional bytes id = 1;
diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/security.go b/vendor/github.com/libp2p/go-libp2p/core/sec/security.go
index c192a56a9..83059d94c 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/sec/security.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/sec/security.go
@@ -7,6 +7,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
)
// SecureConn is an authenticated, encrypted connection.
@@ -24,19 +25,7 @@ type SecureTransport interface {
// SecureOutbound secures an outbound connection.
SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (SecureConn, error)
-}
-// A SecureMuxer is a wrapper around SecureTransport which can select security protocols
-// and open outbound connections with simultaneous open.
-type SecureMuxer interface {
- // SecureInbound secures an inbound connection.
- // The returned boolean indicates whether the connection should be treated as a server
- // connection; in the case of SecureInbound it should always be true.
- // If p is empty, connections from any peer are accepted.
- SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (SecureConn, bool, error)
-
- // SecureOutbound secures an outbound connection.
- // The returned boolean indicates whether the connection should be treated as a server
- // connection due to simultaneous open.
- SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (SecureConn, bool, error)
+ // ID is the protocol ID of the security protocol.
+ ID() protocol.ID
}
diff --git a/vendor/github.com/libp2p/go-libp2p/core/transport/transport.go b/vendor/github.com/libp2p/go-libp2p/core/transport/transport.go
index 379e9d6d4..859c6d608 100644
--- a/vendor/github.com/libp2p/go-libp2p/core/transport/transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/core/transport/transport.go
@@ -4,6 +4,7 @@ package transport
import (
"context"
+ "errors"
"net"
"github.com/libp2p/go-libp2p/core/network"
@@ -77,6 +78,12 @@ type Transport interface {
Proxy() bool
}
+// Resolver can be optionally implemented by transports that want to resolve or transform the
+// multiaddr.
+type Resolver interface {
+ Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error)
+}
+
// Listener is an interface closely resembling the net.Listener interface. The
// only real difference is that Accept() returns Conn's of the type in this
// package, and also exposes a Multiaddr method as opposed to a regular Addr
@@ -88,6 +95,9 @@ type Listener interface {
Multiaddr() ma.Multiaddr
}
+// ErrListenerClosed is returned by Listener.Accept when the listener is gracefully closed.
+var ErrListenerClosed = errors.New("listener closed")
+
// TransportNetwork is an inet.Network with methods for managing transports.
type TransportNetwork interface {
network.Network
diff --git a/vendor/github.com/libp2p/go-libp2p/defaults.go b/vendor/github.com/libp2p/go-libp2p/defaults.go
index ca7064486..c0ed6698a 100644
--- a/vendor/github.com/libp2p/go-libp2p/defaults.go
+++ b/vendor/github.com/libp2p/go-libp2p/defaults.go
@@ -15,8 +15,11 @@ import (
quic "github.com/libp2p/go-libp2p/p2p/transport/quic"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+ webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
)
// DefaultSecurity is the default security option.
@@ -32,7 +35,7 @@ var DefaultSecurity = ChainOptions(
//
// Use this option when you want to *extend* the set of multiplexers used by
// libp2p instead of replacing them.
-var DefaultMuxers = Muxer("/yamux/1.0.0", yamux.DefaultTransport)
+var DefaultMuxers = Muxer(yamux.ID, yamux.DefaultTransport)
// DefaultTransports are the default libp2p transports.
//
@@ -42,6 +45,16 @@ var DefaultTransports = ChainOptions(
Transport(tcp.NewTCPTransport),
Transport(quic.NewTransport),
Transport(ws.New),
+ Transport(webtransport.New),
+)
+
+// DefaultPrivateTransports are the default libp2p transports when a PSK is supplied.
+//
+// Use this option when you want to *extend* the set of transports used by
+// libp2p instead of replacing them.
+var DefaultPrivateTransports = ChainOptions(
+ Transport(tcp.NewTCPTransport),
+ Transport(ws.New),
)
// DefaultPeerstore configures libp2p to use the default peerstore.
@@ -64,19 +77,25 @@ var RandomIdentity = func(cfg *Config) error {
// DefaultListenAddrs configures libp2p to use default listen address.
var DefaultListenAddrs = func(cfg *Config) error {
- defaultIP4ListenAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
- if err != nil {
- return err
+ addrs := []string{
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/udp/0/quic",
+ "/ip4/0.0.0.0/udp/0/quic-v1",
+ "/ip4/0.0.0.0/udp/0/quic-v1/webtransport",
+ "/ip6/::/tcp/0",
+ "/ip6/::/udp/0/quic",
+ "/ip6/::/udp/0/quic-v1",
+ "/ip6/::/udp/0/quic-v1/webtransport",
}
-
- defaultIP6ListenAddr, err := multiaddr.NewMultiaddr("/ip6/::/tcp/0")
- if err != nil {
- return err
+ listenAddrs := make([]multiaddr.Multiaddr, 0, len(addrs))
+ for _, s := range addrs {
+ addr, err := multiaddr.NewMultiaddr(s)
+ if err != nil {
+ return err
+ }
+ listenAddrs = append(listenAddrs, addr)
}
- return cfg.Apply(ListenAddrs(
- defaultIP4ListenAddr,
- defaultIP6ListenAddr,
- ))
+ return cfg.Apply(ListenAddrs(listenAddrs...))
}
// DefaultEnableRelay enables relay dialing and listening by default.
@@ -106,6 +125,16 @@ var DefaultConnectionManager = func(cfg *Config) error {
return cfg.Apply(ConnectionManager(mgr))
}
+// DefaultMultiaddrResolver creates a default connection manager
+var DefaultMultiaddrResolver = func(cfg *Config) error {
+ return cfg.Apply(MultiaddrResolver(madns.DefaultResolver))
+}
+
+// DefaultPrometheusRegisterer configures libp2p to use the default registerer
+var DefaultPrometheusRegisterer = func(cfg *Config) error {
+ return cfg.Apply(PrometheusRegisterer(prometheus.DefaultRegisterer))
+}
+
// Complete list of default options and when to fallback on them.
//
// Please *DON'T* specify default options any other way. Putting this all here
@@ -119,9 +148,13 @@ var defaults = []struct {
opt: DefaultListenAddrs,
},
{
- fallback: func(cfg *Config) bool { return cfg.Transports == nil },
+ fallback: func(cfg *Config) bool { return cfg.Transports == nil && cfg.PSK == nil },
opt: DefaultTransports,
},
+ {
+ fallback: func(cfg *Config) bool { return cfg.Transports == nil && cfg.PSK != nil },
+ opt: DefaultPrivateTransports,
+ },
{
fallback: func(cfg *Config) bool { return cfg.Muxers == nil },
opt: DefaultMuxers,
@@ -150,6 +183,14 @@ var defaults = []struct {
fallback: func(cfg *Config) bool { return cfg.ConnManager == nil },
opt: DefaultConnectionManager,
},
+ {
+ fallback: func(cfg *Config) bool { return cfg.MultiaddrResolver == nil },
+ opt: DefaultMultiaddrResolver,
+ },
+ {
+ fallback: func(cfg *Config) bool { return !cfg.DisableMetrics && cfg.PrometheusRegisterer == nil },
+ opt: DefaultPrometheusRegisterer,
+ },
}
// Defaults configures libp2p to use the default options. Can be combined with
diff --git a/vendor/github.com/libp2p/go-libp2p/error_util.go b/vendor/github.com/libp2p/go-libp2p/error_util.go
deleted file mode 100644
index 86827f4ea..000000000
--- a/vendor/github.com/libp2p/go-libp2p/error_util.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package libp2p
-
-import (
- "fmt"
- "runtime"
-)
-
-func traceError(err error, skip int) error {
- if err == nil {
- return nil
- }
- _, file, line, ok := runtime.Caller(skip + 1)
- if !ok {
- return err
- }
- return fmt.Errorf("%s:%d: %s", file, line, err)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/libp2p.go b/vendor/github.com/libp2p/go-libp2p/libp2p.go
index d77c3b9b2..db23253b1 100644
--- a/vendor/github.com/libp2p/go-libp2p/libp2p.go
+++ b/vendor/github.com/libp2p/go-libp2p/libp2p.go
@@ -37,8 +37,7 @@ func ChainOptions(opts ...Option) Option {
// transport protocols;
//
// - If no multiplexer configuration is provided, the node is configured by
-// default to use the "yamux/1.0.0" and "mplux/6.7.0" stream connection
-// multiplexers;
+// default to use yamux;
//
// - If no security transport is provided, the host uses the go-libp2p's noise
// and/or tls encrypted transport to encrypt all traffic;
diff --git a/vendor/github.com/libp2p/go-libp2p/limits.go b/vendor/github.com/libp2p/go-libp2p/limits.go
index cf81fa762..5871577e5 100644
--- a/vendor/github.com/libp2p/go-libp2p/limits.go
+++ b/vendor/github.com/libp2p/go-libp2p/limits.go
@@ -4,7 +4,6 @@ import (
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/p2p/host/autonat"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
- relayv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay"
circuit "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
"github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
@@ -25,7 +24,7 @@ func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) {
rcmgr.BaseLimit{StreamsInbound: 16, StreamsOutbound: 16, Streams: 32, Memory: 1 << 20},
rcmgr.BaseLimitIncrease{},
)
- for _, id := range [...]protocol.ID{identify.ID, identify.IDDelta, identify.IDPush} {
+ for _, id := range [...]protocol.ID{identify.ID, identify.IDPush} {
config.AddProtocolLimit(
id,
rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 128, Memory: 4 << 20},
@@ -76,18 +75,6 @@ func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) {
rcmgr.BaseLimitIncrease{},
)
- // relay/v1
- config.AddServiceLimit(
- relayv1.ServiceName,
- rcmgr.BaseLimit{StreamsInbound: 256, StreamsOutbound: 256, Streams: 256, Memory: 16 << 20},
- rcmgr.BaseLimitIncrease{StreamsInbound: 256, StreamsOutbound: 256, Streams: 256, Memory: 16 << 20},
- )
- config.AddServicePeerLimit(
- relayv1.ServiceName,
- rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 64, Memory: 1 << 20},
- rcmgr.BaseLimitIncrease{},
- )
-
// relay/v2
config.AddServiceLimit(
relayv2.ServiceName,
@@ -101,7 +88,7 @@ func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) {
)
// circuit protocols, both client and service
- for _, proto := range [...]protocol.ID{circuit.ProtoIDv1, circuit.ProtoIDv2Hop, circuit.ProtoIDv2Stop} {
+ for _, proto := range [...]protocol.ID{circuit.ProtoIDv2Hop, circuit.ProtoIDv2Stop} {
config.AddProtocolLimit(
proto,
rcmgr.BaseLimit{StreamsInbound: 640, StreamsOutbound: 640, Streams: 640, Memory: 16 << 20},
diff --git a/vendor/github.com/libp2p/go-libp2p/options.go b/vendor/github.com/libp2p/go-libp2p/options.go
index 6ed6a94d3..a124a2e27 100644
--- a/vendor/github.com/libp2p/go-libp2p/options.go
+++ b/vendor/github.com/libp2p/go-libp2p/options.go
@@ -4,8 +4,11 @@ package libp2p
// those are in defaults.go).
import (
+ "crypto/rand"
+ "encoding/binary"
"errors"
"fmt"
+ "reflect"
"time"
"github.com/libp2p/go-libp2p/config"
@@ -16,13 +19,19 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/transport"
"github.com/libp2p/go-libp2p/p2p/host/autorelay"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
"github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/prometheus/client_golang/prometheus"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
+ "go.uber.org/fx"
)
// ListenAddrStrings configures libp2p to listen on the given (unparsed)
@@ -61,17 +70,12 @@ func ListenAddrs(addrs ...ma.Multiaddr) Option {
// * Host
// * Network
// * Peerstore
-func Security(name string, tpt interface{}) Option {
- stpt, err := config.SecurityConstructor(tpt)
- err = traceError(err, 1)
+func Security(name string, constructor interface{}) Option {
return func(cfg *Config) error {
- if err != nil {
- return err
- }
if cfg.Insecure {
return fmt.Errorf("cannot use security transports with an insecure libp2p configuration")
}
- cfg.SecurityTransports = append(cfg.SecurityTransports, config.MsSecC{SecC: stpt, ID: name})
+ cfg.SecurityTransports = append(cfg.SecurityTransports, config.Security{ID: protocol.ID(name), Constructor: constructor})
return nil
}
}
@@ -86,25 +90,38 @@ var NoSecurity Option = func(cfg *Config) error {
return nil
}
-// Muxer configures libp2p to use the given stream multiplexer (or stream
-// multiplexer constructor).
-//
-// Name is the protocol name.
-//
-// The transport can be a constructed mux.Transport or a function taking any
-// subset of this libp2p node's:
-// * Peer ID
-// * Host
-// * Network
-// * Peerstore
-func Muxer(name string, tpt interface{}) Option {
- mtpt, err := config.MuxerConstructor(tpt)
- err = traceError(err, 1)
+// Muxer configures libp2p to use the given stream multiplexer.
+// name is the protocol name.
+func Muxer(name string, muxer network.Multiplexer) Option {
return func(cfg *Config) error {
- if err != nil {
- return err
+ cfg.Muxers = append(cfg.Muxers, tptu.StreamMuxer{Muxer: muxer, ID: protocol.ID(name)})
+ return nil
+ }
+}
+
+func QUICReuse(constructor interface{}, opts ...quicreuse.Option) Option {
+ return func(cfg *Config) error {
+ tag := `group:"quicreuseopts"`
+ typ := reflect.ValueOf(constructor).Type()
+ numParams := typ.NumIn()
+ isVariadic := typ.IsVariadic()
+
+ if !isVariadic && len(opts) > 0 {
+ return errors.New("QUICReuse constructor doesn't take any options")
+ }
+
+ var params []string
+ if isVariadic && len(opts) > 0 {
+ // If there are options, apply the tag.
+ // Since options are variadic, they have to be the last argument of the constructor.
+ params = make([]string, numParams)
+ params[len(params)-1] = tag
+ }
+
+ cfg.QUICReuse = append(cfg.QUICReuse, fx.Provide(fx.Annotate(constructor, fx.ParamTags(params...))))
+ for _, opt := range opts {
+ cfg.QUICReuse = append(cfg.QUICReuse, fx.Supply(fx.Annotate(opt, fx.ResultTags(tag))))
}
- cfg.Muxers = append(cfg.Muxers, config.MsMuxC{MuxC: mtpt, ID: name})
return nil
}
}
@@ -124,14 +141,55 @@ func Muxer(name string, tpt interface{}) Option {
// * Public Key
// * Address filter (filter.Filter)
// * Peerstore
-func Transport(tpt interface{}, opts ...interface{}) Option {
- tptc, err := config.TransportConstructor(tpt, opts...)
- err = traceError(err, 1)
+func Transport(constructor interface{}, opts ...interface{}) Option {
return func(cfg *Config) error {
- if err != nil {
- return err
+ // generate a random identifier, so that fx can associate the constructor with its options
+ b := make([]byte, 8)
+ rand.Read(b)
+ id := binary.BigEndian.Uint64(b)
+
+ tag := fmt.Sprintf(`group:"transportopt_%d"`, id)
+
+ typ := reflect.ValueOf(constructor).Type()
+ numParams := typ.NumIn()
+ isVariadic := typ.IsVariadic()
+
+ if !isVariadic && len(opts) > 0 {
+ return errors.New("transport constructor doesn't take any options")
+ }
+ if isVariadic && numParams >= 1 {
+ paramType := typ.In(numParams - 1).Elem()
+ for _, opt := range opts {
+ if typ := reflect.TypeOf(opt); !typ.AssignableTo(paramType) {
+ return fmt.Errorf("transport option of type %s not assignable to %s", typ, paramType)
+ }
+ }
+ }
+
+ var params []string
+ if isVariadic && len(opts) > 0 {
+ // If there are transport options, apply the tag.
+ // Since options are variadic, they have to be the last argument of the constructor.
+ params = make([]string, numParams)
+ params[len(params)-1] = tag
+ }
+
+ cfg.Transports = append(cfg.Transports, fx.Provide(
+ fx.Annotate(
+ constructor,
+ fx.ParamTags(params...),
+ fx.As(new(transport.Transport)),
+ fx.ResultTags(`group:"transport"`),
+ ),
+ ))
+ for _, opt := range opts {
+ cfg.Transports = append(cfg.Transports, fx.Supply(
+ fx.Annotate(
+ opt,
+ fx.ResultTags(tag),
+ ),
+ ))
}
- cfg.Transports = append(cfg.Transports, tptc)
return nil
}
}
@@ -245,10 +303,14 @@ func EnableRelayService(opts ...relayv2.Option) Option {
//
// Dependencies:
// - Relay (enabled by default)
-// - Routing (to find relays), or StaticRelays/DefaultStaticRelays.
+// - Either:
+// 1. A list of static relays
+// 2. A PeerSource function that provides a chan of relays. See `autorelay.WithPeerSource`
//
// This subsystem performs automatic address rewriting to advertise relay addresses when it
// detects that the node is publicly unreachable (e.g. behind a NAT).
+//
+// Deprecated: Use EnableAutoRelayWithStaticRelays or EnableAutoRelayWithPeerSource
func EnableAutoRelay(opts ...autorelay.Option) Option {
return func(cfg *Config) error {
cfg.EnableAutoRelay = true
@@ -257,29 +319,29 @@ func EnableAutoRelay(opts ...autorelay.Option) Option {
}
}
-// StaticRelays configures known relays for autorelay; when this option is enabled
-// then the system will use the configured relays instead of querying the DHT to
-// discover relays.
-// Deprecated: pass an autorelay.WithStaticRelays option to EnableAutoRelay.
-func StaticRelays(relays []peer.AddrInfo) Option {
+// EnableAutoRelayWithStaticRelays configures libp2p to enable the AutoRelay subsystem using
+// the provided relays as relay candidates.
+// This subsystem performs automatic address rewriting to advertise relay addresses when it
+// detects that the node is publicly unreachable (e.g. behind a NAT).
+func EnableAutoRelayWithStaticRelays(static []peer.AddrInfo, opts ...autorelay.Option) Option {
return func(cfg *Config) error {
- cfg.AutoRelayOpts = append(cfg.AutoRelayOpts, autorelay.WithStaticRelays(relays))
+ cfg.EnableAutoRelay = true
+ cfg.AutoRelayOpts = append([]autorelay.Option{autorelay.WithStaticRelays(static)}, opts...)
return nil
}
}
-// DefaultStaticRelays configures the static relays to use the known PL-operated relays.
-// Deprecated: pass autorelay.WithDefaultStaticRelays to EnableAutoRelay.
-func DefaultStaticRelays() Option {
- relays := make([]peer.AddrInfo, 0, len(autorelay.DefaultRelays))
- for _, addr := range autorelay.DefaultRelays {
- pi, err := peer.AddrInfoFromString(addr)
- if err != nil {
- panic(fmt.Sprintf("failed to initialize default static relays: %s", err))
- }
- relays = append(relays, *pi)
+// EnableAutoRelayWithPeerSource configures libp2p to enable the AutoRelay
+// subsystem using the provided PeerSource callback to get more relay
+// candidates. This subsystem performs automatic address rewriting to advertise
+// relay addresses when it detects that the node is publicly unreachable (e.g.
+// behind a NAT).
+func EnableAutoRelayWithPeerSource(peerSource autorelay.PeerSource, opts ...autorelay.Option) Option {
+ return func(cfg *Config) error {
+ cfg.EnableAutoRelay = true
+ cfg.AutoRelayOpts = append([]autorelay.Option{autorelay.WithPeerSource(peerSource)}, opts...)
+ return nil
}
- return StaticRelays(relays)
}
// ForceReachabilityPublic overrides automatic reachability detection in the AutoNAT subsystem,
@@ -410,10 +472,19 @@ var NoListenAddrs = func(cfg *Config) error {
// This will both clear any configured transports (specified in prior libp2p
// options) and prevent libp2p from applying the default transports.
var NoTransports = func(cfg *Config) error {
- cfg.Transports = []config.TptC{}
+ cfg.Transports = []fx.Option{}
return nil
}
+// ProtocolVersion sets the protocolVersion string required by the
+// libp2p Identify protocol.
+func ProtocolVersion(s string) Option {
+ return func(cfg *Config) error {
+ cfg.ProtocolVersion = s
+ return nil
+ }
+}
+
// UserAgent sets the libp2p user-agent sent along with the identify protocol
func UserAgent(userAgent string) Option {
return func(cfg *Config) error {
@@ -478,3 +549,41 @@ func WithDialTimeout(t time.Duration) Option {
return nil
}
}
+
+// DisableMetrics configures libp2p to disable prometheus metrics
+func DisableMetrics() Option {
+ return func(cfg *Config) error {
+ cfg.DisableMetrics = true
+ return nil
+ }
+}
+
+// PrometheusRegisterer configures libp2p to use reg as the Registerer for all metrics subsystems
+func PrometheusRegisterer(reg prometheus.Registerer) Option {
+ return func(cfg *Config) error {
+ if cfg.DisableMetrics {
+ return errors.New("cannot set registerer when metrics are disabled")
+ }
+ if cfg.PrometheusRegisterer != nil {
+ return errors.New("registerer already set")
+ }
+ if reg == nil {
+ return errors.New("registerer cannot be nil")
+ }
+ cfg.PrometheusRegisterer = reg
+ return nil
+ }
+}
+
+// DialRanker configures libp2p to use d as the dial ranker. To enable smart
+// dialing use `swarm.DefaultDialRanker`. use `swarm.NoDelayDialRanker` to
+// disable smart dialing.
+func DialRanker(d network.DialRanker) Option {
+ return func(cfg *Config) error {
+ if cfg.DialRanker != nil {
+ return errors.New("dial ranker already configured")
+ }
+ cfg.DialRanker = d
+ return nil
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/discovery/util/util.go b/vendor/github.com/libp2p/go-libp2p/p2p/discovery/util/util.go
index 741752615..b21a676a7 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/discovery/util/util.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/discovery/util/util.go
@@ -14,13 +14,13 @@ var log = logging.Logger("discovery-util")
// FindPeers is a utility function that synchronously collects peers from a Discoverer.
func FindPeers(ctx context.Context, d discovery.Discoverer, ns string, opts ...discovery.Option) ([]peer.AddrInfo, error) {
- var res []peer.AddrInfo
ch, err := d.FindPeers(ctx, ns, opts...)
if err != nil {
return nil, err
}
+ res := make([]peer.AddrInfo, 0, len(ch))
for pi := range ch {
res = append(res, pi)
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go
index 0fd8b0ec4..fc8c6763b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go
@@ -2,7 +2,6 @@ package autonat
import (
"context"
- "errors"
"math/rand"
"sync/atomic"
"time"
@@ -20,6 +19,8 @@ import (
var log = logging.Logger("autonat")
+const maxConfidence = 3
+
// AmbientAutoNAT is the implementation of ambient NAT autodiscovery
type AmbientAutoNAT struct {
host host.Host
@@ -30,10 +31,10 @@ type AmbientAutoNAT struct {
ctxCancel context.CancelFunc // is closed when Close is called
backgroundRunning chan struct{} // is closed when the background go routine exits
- inboundConn chan network.Conn
- observations chan autoNATResult
+ inboundConn chan network.Conn
+ dialResponses chan error
// status is an autoNATResult reflecting current status.
- status atomic.Value
+ status atomic.Pointer[network.Reachability]
// Reflects the confidence on of the NATStatus being private, as a single
// dialback may fail for reasons unrelated to NAT.
// If it is <3, then multiple autoNAT peers may be contacted for dialback
@@ -58,11 +59,6 @@ type StaticAutoNAT struct {
service *autoNATService
}
-type autoNATResult struct {
- network.Reachability
- address ma.Multiaddr
-}
-
// New creates a new NAT autodiscovery system attached to a host
func New(h host.Host, options ...Option) (AutoNAT, error) {
var err error
@@ -111,15 +107,19 @@ func New(h host.Host, options ...Option) (AutoNAT, error) {
host: h,
config: conf,
inboundConn: make(chan network.Conn, 5),
- observations: make(chan autoNATResult, 1),
+ dialResponses: make(chan error, 1),
emitReachabilityChanged: emitReachabilityChanged,
service: service,
recentProbes: make(map[peer.ID]time.Time),
}
- as.status.Store(autoNATResult{network.ReachabilityUnknown, nil})
+ reachability := network.ReachabilityUnknown
+ as.status.Store(&reachability)
- subscriber, err := as.host.EventBus().Subscribe([]interface{}{new(event.EvtLocalAddressesUpdated), new(event.EvtPeerIdentificationCompleted)})
+ subscriber, err := as.host.EventBus().Subscribe(
+ []any{new(event.EvtLocalAddressesUpdated), new(event.EvtPeerIdentificationCompleted)},
+ eventbus.Name("autonat"),
+ )
if err != nil {
return nil, err
}
@@ -133,23 +133,16 @@ func New(h host.Host, options ...Option) (AutoNAT, error) {
// Status returns the AutoNAT observed reachability status.
func (as *AmbientAutoNAT) Status() network.Reachability {
- s := as.status.Load().(autoNATResult)
- return s.Reachability
+ s := as.status.Load()
+ return *s
}
func (as *AmbientAutoNAT) emitStatus() {
- status := as.status.Load().(autoNATResult)
- as.emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: status.Reachability})
-}
-
-// PublicAddr returns the publicly connectable Multiaddr of this node if one is known.
-func (as *AmbientAutoNAT) PublicAddr() (ma.Multiaddr, error) {
- s := as.status.Load().(autoNATResult)
- if s.Reachability != network.ReachabilityPublic {
- return nil, errors.New("NAT status is not public")
+ status := *as.status.Load()
+ as.emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: status})
+ if as.metricsTracer != nil {
+ as.metricsTracer.ReachabilityStatus(status)
}
-
- return s.address, nil
}
func ipInList(candidate ma.Multiaddr, list []ma.Multiaddr) bool {
@@ -168,7 +161,6 @@ func (as *AmbientAutoNAT) background() {
// before starting autodetection
delay := as.config.bootDelay
- var lastAddrUpdated time.Time
subChan := as.subscriber.Out()
defer as.subscriber.Close()
defer as.emitReachabilityChanged.Close()
@@ -176,15 +168,12 @@ func (as *AmbientAutoNAT) background() {
timer := time.NewTimer(delay)
defer timer.Stop()
timerRunning := true
+ retryProbe := false
for {
select {
// new inbound connection.
case conn := <-as.inboundConn:
localAddrs := as.host.Addrs()
- ca := as.status.Load().(autoNATResult)
- if ca.address != nil {
- localAddrs = append(localAddrs, ca.address)
- }
if manet.IsPublicAddr(conn.RemoteMultiaddr()) &&
!ipInList(conn.RemoteMultiaddr(), localAddrs) {
as.lastInbound = time.Now()
@@ -193,16 +182,15 @@ func (as *AmbientAutoNAT) background() {
case e := <-subChan:
switch e := e.(type) {
case event.EvtLocalAddressesUpdated:
- if !lastAddrUpdated.Add(time.Second).After(time.Now()) {
- lastAddrUpdated = time.Now()
- if as.confidence > 1 {
- as.confidence--
- }
+ // On local address update, reduce confidence from maximum so that we schedule
+ // the next probe sooner
+ if as.confidence == maxConfidence {
+ as.confidence--
}
case event.EvtPeerIdentificationCompleted:
if s, err := as.host.Peerstore().SupportsProtocols(e.Peer, AutoNATProto); err == nil && len(s) > 0 {
- currentStatus := as.status.Load().(autoNATResult)
- if currentStatus.Reachability == network.ReachabilityUnknown {
+ currentStatus := *as.status.Load()
+ if currentStatus == network.ReachabilityUnknown {
as.tryProbe(e.Peer)
}
}
@@ -211,15 +199,20 @@ func (as *AmbientAutoNAT) background() {
}
// probe finished.
- case result, ok := <-as.observations:
+ case err, ok := <-as.dialResponses:
if !ok {
return
}
- as.recordObservation(result)
+ if IsDialRefused(err) {
+ retryProbe = true
+ } else {
+ as.handleDialResponse(err)
+ }
case <-timer.C:
peer := as.getPeerToProbe()
as.tryProbe(peer)
timerRunning = false
+ retryProbe = false
case <-as.ctx.Done():
return
}
@@ -228,7 +221,7 @@ func (as *AmbientAutoNAT) background() {
if timerRunning && !timer.Stop() {
<-timer.C
}
- timer.Reset(as.scheduleProbe())
+ timer.Reset(as.scheduleProbe(retryProbe))
timerRunning = true
}
}
@@ -243,14 +236,15 @@ func (as *AmbientAutoNAT) cleanupRecentProbes() {
}
// scheduleProbe calculates when the next probe should be scheduled for.
-func (as *AmbientAutoNAT) scheduleProbe() time.Duration {
+func (as *AmbientAutoNAT) scheduleProbe(retryProbe bool) time.Duration {
// Our baseline is a probe every 'AutoNATRefreshInterval'
// This is modulated by:
- // * if we are in an unknown state, or have low confidence, that should drop to 'AutoNATRetryInterval'
+ // * if we are in an unknown state, have low confidence, or we want to retry because a probe was refused that
+ // should drop to 'AutoNATRetryInterval'
// * recent inbound connections (implying continued connectivity) should decrease the retry when public
// * recent inbound connections when not public mean we should try more actively to see if we're public.
fixedNow := time.Now()
- currentStatus := as.status.Load().(autoNATResult)
+ currentStatus := *as.status.Load()
nextProbe := fixedNow
// Don't look for peers in the peer store more than once per second.
@@ -262,13 +256,15 @@ func (as *AmbientAutoNAT) scheduleProbe() time.Duration {
}
if !as.lastProbe.IsZero() {
untilNext := as.config.refreshInterval
- if currentStatus.Reachability == network.ReachabilityUnknown {
+ if retryProbe {
untilNext = as.config.retryInterval
- } else if as.confidence < 3 {
+ } else if currentStatus == network.ReachabilityUnknown {
untilNext = as.config.retryInterval
- } else if currentStatus.Reachability == network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) {
+ } else if as.confidence < maxConfidence {
+ untilNext = as.config.retryInterval
+ } else if currentStatus == network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) {
untilNext *= 2
- } else if currentStatus.Reachability != network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) {
+ } else if currentStatus != network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) {
untilNext /= 5
}
@@ -276,72 +272,86 @@ func (as *AmbientAutoNAT) scheduleProbe() time.Duration {
nextProbe = as.lastProbe.Add(untilNext)
}
}
-
+ if as.metricsTracer != nil {
+ as.metricsTracer.NextProbeTime(nextProbe)
+ }
return nextProbe.Sub(fixedNow)
}
-// Update the current status based on an observed result.
-func (as *AmbientAutoNAT) recordObservation(observation autoNATResult) {
- currentStatus := as.status.Load().(autoNATResult)
- if observation.Reachability == network.ReachabilityPublic {
- log.Debugf("NAT status is public")
+// handleDialResponse updates the current status based on dial response.
+func (as *AmbientAutoNAT) handleDialResponse(dialErr error) {
+ var observation network.Reachability
+ switch {
+ case dialErr == nil:
+ observation = network.ReachabilityPublic
+ case IsDialError(dialErr):
+ observation = network.ReachabilityPrivate
+ default:
+ observation = network.ReachabilityUnknown
+ }
+
+ as.recordObservation(observation)
+}
+
+// recordObservation updates NAT status and confidence
+func (as *AmbientAutoNAT) recordObservation(observation network.Reachability) {
+
+ currentStatus := *as.status.Load()
+
+ if observation == network.ReachabilityPublic {
changed := false
- if currentStatus.Reachability != network.ReachabilityPublic {
+ if currentStatus != network.ReachabilityPublic {
+ // Aggressively switch to public from other states ignoring confidence
+ log.Debugf("NAT status is public")
+
// we are flipping our NATStatus, so confidence drops to 0
as.confidence = 0
if as.service != nil {
as.service.Enable()
}
changed = true
- } else if as.confidence < 3 {
+ } else if as.confidence < maxConfidence {
as.confidence++
}
- if observation.address != nil {
- if !changed && currentStatus.address != nil && !observation.address.Equal(currentStatus.address) {
- as.confidence--
- }
- if currentStatus.address == nil || !observation.address.Equal(currentStatus.address) {
- changed = true
- }
- as.status.Store(observation)
- }
- if observation.address != nil && changed {
+ as.status.Store(&observation)
+ if changed {
as.emitStatus()
}
- } else if observation.Reachability == network.ReachabilityPrivate {
- log.Debugf("NAT status is private")
- if currentStatus.Reachability == network.ReachabilityPublic {
+ } else if observation == network.ReachabilityPrivate {
+ if currentStatus != network.ReachabilityPrivate {
if as.confidence > 0 {
as.confidence--
} else {
+ log.Debugf("NAT status is private")
+
// we are flipping our NATStatus, so confidence drops to 0
as.confidence = 0
- as.status.Store(observation)
+ as.status.Store(&observation)
if as.service != nil {
as.service.Disable()
}
as.emitStatus()
}
- } else if as.confidence < 3 {
+ } else if as.confidence < maxConfidence {
as.confidence++
- as.status.Store(observation)
- if currentStatus.Reachability != network.ReachabilityPrivate {
- as.emitStatus()
- }
+ as.status.Store(&observation)
}
} else if as.confidence > 0 {
// don't just flip to unknown, reduce confidence first
as.confidence--
} else {
log.Debugf("NAT status is unknown")
- as.status.Store(autoNATResult{network.ReachabilityUnknown, nil})
- if currentStatus.Reachability != network.ReachabilityUnknown {
+ as.status.Store(&observation)
+ if currentStatus != network.ReachabilityUnknown {
if as.service != nil {
as.service.Enable()
}
as.emitStatus()
}
}
+ if as.metricsTracer != nil {
+ as.metricsTracer.ReachabilityStatusConfidence(as.confidence)
+ }
}
func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool {
@@ -369,27 +379,15 @@ func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool {
}
func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) {
- cli := NewAutoNATClient(as.host, as.config.addressFunc)
+ cli := NewAutoNATClient(as.host, as.config.addressFunc, as.metricsTracer)
ctx, cancel := context.WithTimeout(as.ctx, as.config.requestTimeout)
defer cancel()
- a, err := cli.DialBack(ctx, pi.ID)
-
- var result autoNATResult
- switch {
- case err == nil:
- log.Debugf("Dialback through %s successful; public address is %s", pi.ID.Pretty(), a.String())
- result.Reachability = network.ReachabilityPublic
- result.address = a
- case IsDialError(err):
- log.Debugf("Dialback through %s failed", pi.ID.Pretty())
- result.Reachability = network.ReachabilityPrivate
- default:
- result.Reachability = network.ReachabilityUnknown
- }
+ err := cli.DialBack(ctx, pi.ID)
+ log.Debugf("Dialback through peer %s completed: err: %s", pi.ID, err)
select {
- case as.observations <- result:
+ case as.dialResponses <- err:
case <-as.ctx.Done():
return
}
@@ -427,8 +425,7 @@ func (as *AmbientAutoNAT) getPeerToProbe() peer.ID {
return ""
}
- shufflePeers(candidates)
- return candidates[0]
+ return candidates[rand.Intn(len(candidates))]
}
func (as *AmbientAutoNAT) Close() error {
@@ -440,26 +437,11 @@ func (as *AmbientAutoNAT) Close() error {
return nil
}
-func shufflePeers(peers []peer.ID) {
- for i := range peers {
- j := rand.Intn(i + 1)
- peers[i], peers[j] = peers[j], peers[i]
- }
-}
-
// Status returns the AutoNAT observed reachability status.
func (s *StaticAutoNAT) Status() network.Reachability {
return s.reachability
}
-// PublicAddr returns the publicly connectable Multiaddr of this node if one is known.
-func (s *StaticAutoNAT) PublicAddr() (ma.Multiaddr, error) {
- if s.reachability != network.ReachabilityPublic {
- return nil, errors.New("NAT status is not public")
- }
- return nil, errors.New("no available address")
-}
-
func (s *StaticAutoNAT) Close() error {
if s.service != nil {
s.service.Disable()
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go
index 3edbeb50e..fa0e03bc5 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go
@@ -8,24 +8,24 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
- pb "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
- "github.com/libp2p/go-msgio/protoio"
- ma "github.com/multiformats/go-multiaddr"
+ "github.com/libp2p/go-msgio/pbio"
)
// NewAutoNATClient creates a fresh instance of an AutoNATClient
// If addrFunc is nil, h.Addrs will be used
-func NewAutoNATClient(h host.Host, addrFunc AddrFunc) Client {
+func NewAutoNATClient(h host.Host, addrFunc AddrFunc, mt MetricsTracer) Client {
if addrFunc == nil {
addrFunc = h.Addrs
}
- return &client{h: h, addrFunc: addrFunc}
+ return &client{h: h, addrFunc: addrFunc, mt: mt}
}
type client struct {
h host.Host
addrFunc AddrFunc
+ mt MetricsTracer
}
// DialBack asks peer p to dial us back on all addresses returned by the addrFunc.
@@ -34,22 +34,22 @@ type client struct {
// Note: A returned error Message_E_DIAL_ERROR does not imply that the server
// actually performed a dial attempt. Servers that run a version < v0.20.0 also
// return Message_E_DIAL_ERROR if the dial was skipped due to the dialPolicy.
-func (c *client) DialBack(ctx context.Context, p peer.ID) (ma.Multiaddr, error) {
+func (c *client) DialBack(ctx context.Context, p peer.ID) error {
s, err := c.h.NewStream(ctx, p, AutoNATProto)
if err != nil {
- return nil, err
+ return err
}
if err := s.Scope().SetService(ServiceName); err != nil {
log.Debugf("error attaching stream to autonat service: %s", err)
s.Reset()
- return nil, err
+ return err
}
if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
log.Debugf("error reserving memory for autonat stream: %s", err)
s.Reset()
- return nil, err
+ return err
}
defer s.Scope().ReleaseMemory(maxMsgSize)
@@ -58,32 +58,34 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) (ma.Multiaddr, error)
// don't care about being nice.
defer s.Close()
- r := protoio.NewDelimitedReader(s, maxMsgSize)
- w := protoio.NewDelimitedWriter(s)
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ w := pbio.NewDelimitedWriter(s)
req := newDialMessage(peer.AddrInfo{ID: c.h.ID(), Addrs: c.addrFunc()})
if err := w.WriteMsg(req); err != nil {
s.Reset()
- return nil, err
+ return err
}
var res pb.Message
if err := r.ReadMsg(&res); err != nil {
s.Reset()
- return nil, err
+ return err
}
if res.GetType() != pb.Message_DIAL_RESPONSE {
s.Reset()
- return nil, fmt.Errorf("unexpected response: %s", res.GetType().String())
+ return fmt.Errorf("unexpected response: %s", res.GetType().String())
}
status := res.GetDialResponse().GetStatus()
+ if c.mt != nil {
+ c.mt.ReceivedDialResponse(status)
+ }
switch status {
case pb.Message_OK:
- addr := res.GetDialResponse().GetAddr()
- return ma.NewMultiaddrBytes(addr)
+ return nil
default:
- return nil, Error{Status: status, Text: res.GetDialResponse().GetStatusText()}
+ return Error{Status: status, Text: res.GetDialResponse().GetStatusText()}
}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go
index f4c89beab..9bf3bfe52 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go
@@ -14,9 +14,6 @@ import (
type AutoNAT interface {
// Status returns the current NAT status
Status() network.Reachability
- // PublicAddr returns the public dial address when NAT status is public and an
- // error otherwise
- PublicAddr() (ma.Multiaddr, error)
io.Closer
}
@@ -24,7 +21,7 @@ type AutoNAT interface {
type Client interface {
// DialBack requests from a peer providing AutoNAT services to test dial back
// and report the address on a successful connection.
- DialBack(ctx context.Context, p peer.ID) (ma.Multiaddr, error)
+ DialBack(ctx context.Context, p peer.ID) error
}
// AddrFunc is a function returning the candidate addresses for the local host.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go
new file mode 100644
index 000000000..4207d4e7d
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go
@@ -0,0 +1,162 @@
+package autonat
+
+import (
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_autonat"
+
+var (
+ reachabilityStatus = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "reachability_status",
+ Help: "Current node reachability",
+ },
+ )
+ reachabilityStatusConfidence = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "reachability_status_confidence",
+ Help: "Node reachability status confidence",
+ },
+ )
+ receivedDialResponseTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "received_dial_response_total",
+ Help: "Count of dial responses for client",
+ },
+ []string{"response_status"},
+ )
+ outgoingDialResponseTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "outgoing_dial_response_total",
+ Help: "Count of dial responses for server",
+ },
+ []string{"response_status"},
+ )
+ outgoingDialRefusedTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "outgoing_dial_refused_total",
+ Help: "Count of dial requests refused by server",
+ },
+ []string{"refusal_reason"},
+ )
+ nextProbeTimestamp = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "next_probe_timestamp",
+ Help: "Time of next probe",
+ },
+ )
+ collectors = []prometheus.Collector{
+ reachabilityStatus,
+ reachabilityStatusConfidence,
+ receivedDialResponseTotal,
+ outgoingDialResponseTotal,
+ outgoingDialRefusedTotal,
+ nextProbeTimestamp,
+ }
+)
+
+type MetricsTracer interface {
+ ReachabilityStatus(status network.Reachability)
+ ReachabilityStatusConfidence(confidence int)
+ ReceivedDialResponse(status pb.Message_ResponseStatus)
+ OutgoingDialResponse(status pb.Message_ResponseStatus)
+ OutgoingDialRefused(reason string)
+ NextProbeTime(t time.Time)
+}
+
+func getResponseStatus(status pb.Message_ResponseStatus) string {
+ var s string
+ switch status {
+ case pb.Message_OK:
+ s = "ok"
+ case pb.Message_E_DIAL_ERROR:
+ s = "dial error"
+ case pb.Message_E_DIAL_REFUSED:
+ s = "dial refused"
+ case pb.Message_E_BAD_REQUEST:
+ s = "bad request"
+ case pb.Message_E_INTERNAL_ERROR:
+ s = "internal error"
+ default:
+ s = "unknown"
+ }
+ return s
+}
+
+const (
+ rate_limited = "rate limited"
+ dial_blocked = "dial blocked"
+ no_valid_address = "no valid address"
+)
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (mt *metricsTracer) ReachabilityStatus(status network.Reachability) {
+ reachabilityStatus.Set(float64(status))
+}
+
+func (mt *metricsTracer) ReachabilityStatusConfidence(confidence int) {
+ reachabilityStatusConfidence.Set(float64(confidence))
+}
+
+func (mt *metricsTracer) ReceivedDialResponse(status pb.Message_ResponseStatus) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, getResponseStatus(status))
+ receivedDialResponseTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) OutgoingDialResponse(status pb.Message_ResponseStatus) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, getResponseStatus(status))
+ outgoingDialResponseTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) OutgoingDialRefused(reason string) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, reason)
+ outgoingDialRefusedTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) NextProbeTime(t time.Time) {
+ nextProbeTimestamp.Set(float64(t.Unix()))
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go
index 0935dc233..8e653f816 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go
@@ -17,6 +17,7 @@ type config struct {
dialer network.Network
forceReachability bool
reachability network.Reachability
+ metricsTracer MetricsTracer
// client
bootDelay time.Duration
@@ -142,3 +143,11 @@ func WithPeerThrottling(amount int) Option {
return nil
}
}
+
+// WithMetricsTracer uses mt to track autonat metrics
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(c *config) error {
+ c.metricsTracer = mt
+ return nil
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile
deleted file mode 100644
index dd21e878f..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-pbgos := $(patsubst %.proto,%.pb.go,$(wildcard *.proto))
-
-all: $(pbgos)
-
-%.pb.go: %.proto
- protoc --gogofast_out=. --proto_path=$(GOPATH)/src:. $<
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go
index a22b5e99e..2764883f4 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go
@@ -1,26 +1,24 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: autonat.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/autonat.proto
-package autonat_pb
+package pb
import (
- fmt "fmt"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type Message_MessageType int32
@@ -29,15 +27,17 @@ const (
Message_DIAL_RESPONSE Message_MessageType = 1
)
-var Message_MessageType_name = map[int32]string{
- 0: "DIAL",
- 1: "DIAL_RESPONSE",
-}
-
-var Message_MessageType_value = map[string]int32{
- "DIAL": 0,
- "DIAL_RESPONSE": 1,
-}
+// Enum value maps for Message_MessageType.
+var (
+ Message_MessageType_name = map[int32]string{
+ 0: "DIAL",
+ 1: "DIAL_RESPONSE",
+ }
+ Message_MessageType_value = map[string]int32{
+ "DIAL": 0,
+ "DIAL_RESPONSE": 1,
+ }
+)
func (x Message_MessageType) Enum() *Message_MessageType {
p := new(Message_MessageType)
@@ -46,20 +46,34 @@ func (x Message_MessageType) Enum() *Message_MessageType {
}
func (x Message_MessageType) String() string {
- return proto.EnumName(Message_MessageType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_autonat_proto_enumTypes[0].Descriptor()
+}
+
+func (Message_MessageType) Type() protoreflect.EnumType {
+ return &file_pb_autonat_proto_enumTypes[0]
+}
+
+func (x Message_MessageType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
}
-func (x *Message_MessageType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Message_MessageType_value, data, "Message_MessageType")
+// Deprecated: Do not use.
+func (x *Message_MessageType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = Message_MessageType(value)
+ *x = Message_MessageType(num)
return nil
}
+// Deprecated: Use Message_MessageType.Descriptor instead.
func (Message_MessageType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_a04e278ef61ac07a, []int{0, 0}
+ return file_pb_autonat_proto_rawDescGZIP(), []int{0, 0}
}
type Message_ResponseStatus int32
@@ -72,21 +86,23 @@ const (
Message_E_INTERNAL_ERROR Message_ResponseStatus = 300
)
-var Message_ResponseStatus_name = map[int32]string{
- 0: "OK",
- 100: "E_DIAL_ERROR",
- 101: "E_DIAL_REFUSED",
- 200: "E_BAD_REQUEST",
- 300: "E_INTERNAL_ERROR",
-}
-
-var Message_ResponseStatus_value = map[string]int32{
- "OK": 0,
- "E_DIAL_ERROR": 100,
- "E_DIAL_REFUSED": 101,
- "E_BAD_REQUEST": 200,
- "E_INTERNAL_ERROR": 300,
-}
+// Enum value maps for Message_ResponseStatus.
+var (
+ Message_ResponseStatus_name = map[int32]string{
+ 0: "OK",
+ 100: "E_DIAL_ERROR",
+ 101: "E_DIAL_REFUSED",
+ 200: "E_BAD_REQUEST",
+ 300: "E_INTERNAL_ERROR",
+ }
+ Message_ResponseStatus_value = map[string]int32{
+ "OK": 0,
+ "E_DIAL_ERROR": 100,
+ "E_DIAL_REFUSED": 101,
+ "E_BAD_REQUEST": 200,
+ "E_INTERNAL_ERROR": 300,
+ }
+)
func (x Message_ResponseStatus) Enum() *Message_ResponseStatus {
p := new(Message_ResponseStatus)
@@ -95,1152 +111,414 @@ func (x Message_ResponseStatus) Enum() *Message_ResponseStatus {
}
func (x Message_ResponseStatus) String() string {
- return proto.EnumName(Message_ResponseStatus_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Message_ResponseStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_autonat_proto_enumTypes[1].Descriptor()
}
-func (x *Message_ResponseStatus) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Message_ResponseStatus_value, data, "Message_ResponseStatus")
+func (Message_ResponseStatus) Type() protoreflect.EnumType {
+ return &file_pb_autonat_proto_enumTypes[1]
+}
+
+func (x Message_ResponseStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *Message_ResponseStatus) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = Message_ResponseStatus(value)
+ *x = Message_ResponseStatus(num)
return nil
}
+// Deprecated: Use Message_ResponseStatus.Descriptor instead.
func (Message_ResponseStatus) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_a04e278ef61ac07a, []int{0, 1}
+ return file_pb_autonat_proto_rawDescGZIP(), []int{0, 1}
}
type Message struct {
- Type *Message_MessageType `protobuf:"varint,1,opt,name=type,enum=autonat.pb.Message_MessageType" json:"type,omitempty"`
- Dial *Message_Dial `protobuf:"bytes,2,opt,name=dial" json:"dial,omitempty"`
- DialResponse *Message_DialResponse `protobuf:"bytes,3,opt,name=dialResponse" json:"dialResponse,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
-func (*Message) Descriptor() ([]byte, []int) {
- return fileDescriptor_a04e278ef61ac07a, []int{0}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Message.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Message) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message.Merge(m, src)
-}
-func (m *Message) XXX_Size() int {
- return m.Size()
+ Type *Message_MessageType `protobuf:"varint,1,opt,name=type,enum=autonat.pb.Message_MessageType" json:"type,omitempty"`
+ Dial *Message_Dial `protobuf:"bytes,2,opt,name=dial" json:"dial,omitempty"`
+ DialResponse *Message_DialResponse `protobuf:"bytes,3,opt,name=dialResponse" json:"dialResponse,omitempty"`
}
-func (m *Message) XXX_DiscardUnknown() {
- xxx_messageInfo_Message.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Message proto.InternalMessageInfo
-func (m *Message) GetType() Message_MessageType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *Message) Reset() {
+ *x = Message{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_autonat_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return Message_DIAL
}
-func (m *Message) GetDial() *Message_Dial {
- if m != nil {
- return m.Dial
- }
- return nil
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Message) GetDialResponse() *Message_DialResponse {
- if m != nil {
- return m.DialResponse
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_autonat_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-type Message_PeerInfo struct {
- Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
- Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_pb_autonat_proto_rawDescGZIP(), []int{0}
}
-func (m *Message_PeerInfo) Reset() { *m = Message_PeerInfo{} }
-func (m *Message_PeerInfo) String() string { return proto.CompactTextString(m) }
-func (*Message_PeerInfo) ProtoMessage() {}
-func (*Message_PeerInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_a04e278ef61ac07a, []int{0, 0}
-}
-func (m *Message_PeerInfo) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Message_PeerInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Message_PeerInfo.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (x *Message) GetType() Message_MessageType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
-}
-func (m *Message_PeerInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message_PeerInfo.Merge(m, src)
-}
-func (m *Message_PeerInfo) XXX_Size() int {
- return m.Size()
-}
-func (m *Message_PeerInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_Message_PeerInfo.DiscardUnknown(m)
+ return Message_DIAL
}
-var xxx_messageInfo_Message_PeerInfo proto.InternalMessageInfo
-
-func (m *Message_PeerInfo) GetId() []byte {
- if m != nil {
- return m.Id
+func (x *Message) GetDial() *Message_Dial {
+ if x != nil {
+ return x.Dial
}
return nil
}
-func (m *Message_PeerInfo) GetAddrs() [][]byte {
- if m != nil {
- return m.Addrs
+func (x *Message) GetDialResponse() *Message_DialResponse {
+ if x != nil {
+ return x.DialResponse
}
return nil
}
-type Message_Dial struct {
- Peer *Message_PeerInfo `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+type Message_PeerInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Message_Dial) Reset() { *m = Message_Dial{} }
-func (m *Message_Dial) String() string { return proto.CompactTextString(m) }
-func (*Message_Dial) ProtoMessage() {}
-func (*Message_Dial) Descriptor() ([]byte, []int) {
- return fileDescriptor_a04e278ef61ac07a, []int{0, 1}
-}
-func (m *Message_Dial) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Message_Dial) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Message_Dial.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Message_Dial) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message_Dial.Merge(m, src)
-}
-func (m *Message_Dial) XXX_Size() int {
- return m.Size()
-}
-func (m *Message_Dial) XXX_DiscardUnknown() {
- xxx_messageInfo_Message_Dial.DiscardUnknown(m)
+ Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
}
-var xxx_messageInfo_Message_Dial proto.InternalMessageInfo
-
-func (m *Message_Dial) GetPeer() *Message_PeerInfo {
- if m != nil {
- return m.Peer
+func (x *Message_PeerInfo) Reset() {
+ *x = Message_PeerInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_autonat_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type Message_DialResponse struct {
- Status *Message_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=autonat.pb.Message_ResponseStatus" json:"status,omitempty"`
- StatusText *string `protobuf:"bytes,2,opt,name=statusText" json:"statusText,omitempty"`
- Addr []byte `protobuf:"bytes,3,opt,name=addr" json:"addr,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *Message_PeerInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Message_DialResponse) Reset() { *m = Message_DialResponse{} }
-func (m *Message_DialResponse) String() string { return proto.CompactTextString(m) }
-func (*Message_DialResponse) ProtoMessage() {}
-func (*Message_DialResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_a04e278ef61ac07a, []int{0, 2}
-}
-func (m *Message_DialResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Message_DialResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Message_DialResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (*Message_PeerInfo) ProtoMessage() {}
+
+func (x *Message_PeerInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_autonat_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *Message_DialResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message_DialResponse.Merge(m, src)
-}
-func (m *Message_DialResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *Message_DialResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_Message_DialResponse.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Message_DialResponse proto.InternalMessageInfo
-
-func (m *Message_DialResponse) GetStatus() Message_ResponseStatus {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return Message_OK
+// Deprecated: Use Message_PeerInfo.ProtoReflect.Descriptor instead.
+func (*Message_PeerInfo) Descriptor() ([]byte, []int) {
+ return file_pb_autonat_proto_rawDescGZIP(), []int{0, 0}
}
-func (m *Message_DialResponse) GetStatusText() string {
- if m != nil && m.StatusText != nil {
- return *m.StatusText
+func (x *Message_PeerInfo) GetId() []byte {
+ if x != nil {
+ return x.Id
}
- return ""
+ return nil
}
-func (m *Message_DialResponse) GetAddr() []byte {
- if m != nil {
- return m.Addr
+func (x *Message_PeerInfo) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
}
return nil
}
-func init() {
- proto.RegisterEnum("autonat.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value)
- proto.RegisterEnum("autonat.pb.Message_ResponseStatus", Message_ResponseStatus_name, Message_ResponseStatus_value)
- proto.RegisterType((*Message)(nil), "autonat.pb.Message")
- proto.RegisterType((*Message_PeerInfo)(nil), "autonat.pb.Message.PeerInfo")
- proto.RegisterType((*Message_Dial)(nil), "autonat.pb.Message.Dial")
- proto.RegisterType((*Message_DialResponse)(nil), "autonat.pb.Message.DialResponse")
-}
+type Message_Dial struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func init() { proto.RegisterFile("autonat.proto", fileDescriptor_a04e278ef61ac07a) }
-
-var fileDescriptor_a04e278ef61ac07a = []byte{
- // 372 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xcf, 0x8a, 0xda, 0x50,
- 0x14, 0xc6, 0xbd, 0x31, 0xb5, 0xf6, 0x18, 0xc3, 0xed, 0xa1, 0x85, 0x20, 0x25, 0x0d, 0x59, 0x49,
- 0x29, 0x22, 0x76, 0x53, 0xba, 0x53, 0x72, 0x0b, 0xd2, 0x56, 0xed, 0x49, 0x5c, 0x87, 0x94, 0xdc,
- 0x0e, 0x01, 0x31, 0x21, 0x89, 0x30, 0x6e, 0xe6, 0x89, 0x66, 0x3b, 0xef, 0xe0, 0x72, 0x1e, 0x61,
- 0xf0, 0x49, 0x86, 0x5c, 0xa3, 0xa3, 0xe0, 0xac, 0xce, 0x1f, 0x7e, 0xdf, 0x39, 0x1f, 0x1f, 0x74,
- 0xa3, 0x4d, 0x99, 0xae, 0xa3, 0x72, 0x90, 0xe5, 0x69, 0x99, 0x22, 0x9c, 0xc6, 0x7f, 0xee, 0x83,
- 0x0e, 0x6f, 0xff, 0xc8, 0xa2, 0x88, 0x6e, 0x24, 0x7e, 0x03, 0xbd, 0xdc, 0x66, 0xd2, 0x62, 0x0e,
- 0xeb, 0x9b, 0xa3, 0xcf, 0x83, 0x17, 0x6c, 0x50, 0x23, 0xc7, 0x1a, 0x6c, 0x33, 0x49, 0x0a, 0xc6,
- 0xaf, 0xa0, 0xc7, 0x49, 0xb4, 0xb2, 0x34, 0x87, 0xf5, 0x3b, 0x23, 0xeb, 0x9a, 0xc8, 0x4b, 0xa2,
- 0x15, 0x29, 0x0a, 0x3d, 0x30, 0xaa, 0x4a, 0xb2, 0xc8, 0xd2, 0x75, 0x21, 0xad, 0xa6, 0x52, 0x39,
- 0xaf, 0xaa, 0x6a, 0x8e, 0x2e, 0x54, 0xbd, 0x21, 0xb4, 0x17, 0x52, 0xe6, 0xd3, 0xf5, 0xff, 0x14,
- 0x4d, 0xd0, 0x92, 0x58, 0x59, 0x36, 0x48, 0x4b, 0x62, 0xfc, 0x00, 0x6f, 0xa2, 0x38, 0xce, 0x0b,
- 0x4b, 0x73, 0x9a, 0x7d, 0x83, 0x0e, 0x43, 0xef, 0x3b, 0xe8, 0xd5, 0x3d, 0x1c, 0x82, 0x9e, 0x49,
- 0x99, 0x2b, 0xbe, 0x33, 0xfa, 0x74, 0xed, 0xef, 0xf1, 0x32, 0x29, 0xb2, 0x77, 0x07, 0xc6, 0xb9,
- 0x13, 0xfc, 0x01, 0xad, 0xa2, 0x8c, 0xca, 0x4d, 0x51, 0xc7, 0xe4, 0x5e, 0xbb, 0x71, 0xa4, 0x7d,
- 0x45, 0x52, 0xad, 0x40, 0x1b, 0xe0, 0xd0, 0x05, 0xf2, 0xb6, 0x54, 0x89, 0xbd, 0xa3, 0xb3, 0x0d,
- 0x22, 0xe8, 0x95, 0x5d, 0x95, 0x8a, 0x41, 0xaa, 0x77, 0xbf, 0x40, 0xe7, 0x2c, 0x74, 0x6c, 0x83,
- 0xee, 0x4d, 0xc7, 0xbf, 0x79, 0x03, 0xdf, 0x43, 0xb7, 0xea, 0x42, 0x12, 0xfe, 0x62, 0x3e, 0xf3,
- 0x05, 0x67, 0x6e, 0x02, 0xe6, 0xe5, 0x67, 0x6c, 0x81, 0x36, 0xff, 0xc5, 0x1b, 0xc8, 0xc1, 0x10,
- 0xa1, 0xc2, 0x05, 0xd1, 0x9c, 0x78, 0x8c, 0x08, 0x66, 0xbd, 0x21, 0xf1, 0x73, 0xe9, 0x0b, 0x8f,
- 0x4b, 0x44, 0xe8, 0x8a, 0x70, 0x32, 0xf6, 0x42, 0x12, 0x7f, 0x97, 0xc2, 0x0f, 0xf8, 0x8e, 0xe1,
- 0x47, 0xe0, 0x22, 0x9c, 0xce, 0x02, 0x41, 0xb3, 0x93, 0xfa, 0x5e, 0x9b, 0x18, 0xbb, 0xbd, 0xcd,
- 0x1e, 0xf7, 0x36, 0x7b, 0xda, 0xdb, 0xec, 0x39, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xe2, 0x93, 0x4e,
- 0x61, 0x02, 0x00, 0x00,
+ Peer *Message_PeerInfo `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
}
-func (m *Message) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *Message_Dial) Reset() {
+ *x = Message_Dial{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_autonat_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return dAtA[:n], nil
}
-func (m *Message) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (x *Message_Dial) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.DialResponse != nil {
- {
- size, err := m.DialResponse.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintAutonat(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.Dial != nil {
- {
- size, err := m.Dial.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintAutonat(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Type != nil {
- i = encodeVarintAutonat(dAtA, i, uint64(*m.Type))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
+func (*Message_Dial) ProtoMessage() {}
-func (m *Message_PeerInfo) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Message_PeerInfo) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Message_PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Addrs) > 0 {
- for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Addrs[iNdEx])
- copy(dAtA[i:], m.Addrs[iNdEx])
- i = encodeVarintAutonat(dAtA, i, uint64(len(m.Addrs[iNdEx])))
- i--
- dAtA[i] = 0x12
+func (x *Message_Dial) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_autonat_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
+ return ms
}
- if m.Id != nil {
- i -= len(m.Id)
- copy(dAtA[i:], m.Id)
- i = encodeVarintAutonat(dAtA, i, uint64(len(m.Id)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Message_Dial) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
+ return mi.MessageOf(x)
}
-func (m *Message_Dial) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+// Deprecated: Use Message_Dial.ProtoReflect.Descriptor instead.
+func (*Message_Dial) Descriptor() ([]byte, []int) {
+ return file_pb_autonat_proto_rawDescGZIP(), []int{0, 1}
}
-func (m *Message_Dial) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Peer != nil {
- {
- size, err := m.Peer.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintAutonat(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
+func (x *Message_Dial) GetPeer() *Message_PeerInfo {
+ if x != nil {
+ return x.Peer
}
- return len(dAtA) - i, nil
+ return nil
}
-func (m *Message_DialResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
+type Message_DialResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Message_DialResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ Status *Message_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=autonat.pb.Message_ResponseStatus" json:"status,omitempty"`
+ StatusText *string `protobuf:"bytes,2,opt,name=statusText" json:"statusText,omitempty"`
+ Addr []byte `protobuf:"bytes,3,opt,name=addr" json:"addr,omitempty"`
}
-func (m *Message_DialResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Addr != nil {
- i -= len(m.Addr)
- copy(dAtA[i:], m.Addr)
- i = encodeVarintAutonat(dAtA, i, uint64(len(m.Addr)))
- i--
- dAtA[i] = 0x1a
+func (x *Message_DialResponse) Reset() {
+ *x = Message_DialResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_autonat_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- if m.StatusText != nil {
- i -= len(*m.StatusText)
- copy(dAtA[i:], *m.StatusText)
- i = encodeVarintAutonat(dAtA, i, uint64(len(*m.StatusText)))
- i--
- dAtA[i] = 0x12
- }
- if m.Status != nil {
- i = encodeVarintAutonat(dAtA, i, uint64(*m.Status))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
}
-func encodeVarintAutonat(dAtA []byte, offset int, v uint64) int {
- offset -= sovAutonat(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Message) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != nil {
- n += 1 + sovAutonat(uint64(*m.Type))
- }
- if m.Dial != nil {
- l = m.Dial.Size()
- n += 1 + l + sovAutonat(uint64(l))
- }
- if m.DialResponse != nil {
- l = m.DialResponse.Size()
- n += 1 + l + sovAutonat(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+func (x *Message_DialResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Message_PeerInfo) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != nil {
- l = len(m.Id)
- n += 1 + l + sovAutonat(uint64(l))
- }
- if len(m.Addrs) > 0 {
- for _, b := range m.Addrs {
- l = len(b)
- n += 1 + l + sovAutonat(uint64(l))
+func (*Message_DialResponse) ProtoMessage() {}
+
+func (x *Message_DialResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_autonat_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
+ return ms
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+ return mi.MessageOf(x)
}
-func (m *Message_Dial) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Peer != nil {
- l = m.Peer.Size()
- n += 1 + l + sovAutonat(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+// Deprecated: Use Message_DialResponse.ProtoReflect.Descriptor instead.
+func (*Message_DialResponse) Descriptor() ([]byte, []int) {
+ return file_pb_autonat_proto_rawDescGZIP(), []int{0, 2}
}
-func (m *Message_DialResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Status != nil {
- n += 1 + sovAutonat(uint64(*m.Status))
- }
- if m.StatusText != nil {
- l = len(*m.StatusText)
- n += 1 + l + sovAutonat(uint64(l))
+func (x *Message_DialResponse) GetStatus() Message_ResponseStatus {
+ if x != nil && x.Status != nil {
+ return *x.Status
}
- if m.Addr != nil {
- l = len(m.Addr)
- n += 1 + l + sovAutonat(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovAutonat(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozAutonat(x uint64) (n int) {
- return sovAutonat(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+ return Message_OK
}
-func (m *Message) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Message: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var v Message_MessageType
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= Message_MessageType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Type = &v
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dial", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Dial == nil {
- m.Dial = &Message_Dial{}
- }
- if err := m.Dial.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DialResponse", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DialResponse == nil {
- m.DialResponse = &Message_DialResponse{}
- }
- if err := m.DialResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAutonat(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *Message_DialResponse) GetStatusText() string {
+ if x != nil && x.StatusText != nil {
+ return *x.StatusText
}
- return nil
+ return ""
}
-func (m *Message_PeerInfo) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PeerInfo: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PeerInfo: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...)
- if m.Id == nil {
- m.Id = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx))
- copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAutonat(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *Message_DialResponse) GetAddr() []byte {
+ if x != nil {
+ return x.Addr
}
return nil
}
-func (m *Message_Dial) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Dial: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Dial: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Peer == nil {
- m.Peer = &Message_PeerInfo{}
- }
- if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAutonat(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
+var File_pb_autonat_proto protoreflect.FileDescriptor
+
+var file_pb_autonat_proto_rawDesc = []byte{
+ 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x22, 0xb5,
+ 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e,
+ 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x2c, 0x0a, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e,
+ 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x12, 0x44, 0x0a,
+ 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62,
+ 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x1a, 0x30, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12,
+ 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12,
+ 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05,
+ 0x61, 0x64, 0x64, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x04, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x30, 0x0a,
+ 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x75,
+ 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x1a,
+ 0x7e, 0x0a, 0x0c, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x22, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61,
+ 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22,
+ 0x2a, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08,
+ 0x0a, 0x04, 0x44, 0x49, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x49, 0x41, 0x4c,
+ 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x01, 0x22, 0x69, 0x0a, 0x0e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a,
+ 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x5f,
+ 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x64, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x5f, 0x44, 0x49, 0x41,
+ 0x4c, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x53, 0x45, 0x44, 0x10, 0x65, 0x12, 0x12, 0x0a, 0x0d, 0x45,
+ 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xc8, 0x01, 0x12,
+ 0x15, 0x0a, 0x10, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52,
+ 0x52, 0x4f, 0x52, 0x10, 0xac, 0x02,
}
-func (m *Message_DialResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+
+var (
+ file_pb_autonat_proto_rawDescOnce sync.Once
+ file_pb_autonat_proto_rawDescData = file_pb_autonat_proto_rawDesc
+)
+
+func file_pb_autonat_proto_rawDescGZIP() []byte {
+ file_pb_autonat_proto_rawDescOnce.Do(func() {
+ file_pb_autonat_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_autonat_proto_rawDescData)
+ })
+ return file_pb_autonat_proto_rawDescData
+}
+
+var file_pb_autonat_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_pb_autonat_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_pb_autonat_proto_goTypes = []interface{}{
+ (Message_MessageType)(0), // 0: autonat.pb.Message.MessageType
+ (Message_ResponseStatus)(0), // 1: autonat.pb.Message.ResponseStatus
+ (*Message)(nil), // 2: autonat.pb.Message
+ (*Message_PeerInfo)(nil), // 3: autonat.pb.Message.PeerInfo
+ (*Message_Dial)(nil), // 4: autonat.pb.Message.Dial
+ (*Message_DialResponse)(nil), // 5: autonat.pb.Message.DialResponse
+}
+var file_pb_autonat_proto_depIdxs = []int32{
+ 0, // 0: autonat.pb.Message.type:type_name -> autonat.pb.Message.MessageType
+ 4, // 1: autonat.pb.Message.dial:type_name -> autonat.pb.Message.Dial
+ 5, // 2: autonat.pb.Message.dialResponse:type_name -> autonat.pb.Message.DialResponse
+ 3, // 3: autonat.pb.Message.Dial.peer:type_name -> autonat.pb.Message.PeerInfo
+ 1, // 4: autonat.pb.Message.DialResponse.status:type_name -> autonat.pb.Message.ResponseStatus
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_pb_autonat_proto_init() }
+func file_pb_autonat_proto_init() {
+ if File_pb_autonat_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_autonat_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
}
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DialResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DialResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ file_pb_autonat_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message_PeerInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- var v Message_ResponseStatus
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= Message_ResponseStatus(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Status = &v
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StatusText", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.StatusText = &s
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAutonat
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAutonat
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...)
- if m.Addr == nil {
- m.Addr = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAutonat(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAutonat
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
}
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipAutonat(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+ file_pb_autonat_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message_Dial); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
}
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAutonat
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
+ file_pb_autonat_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Message_DialResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- if length < 0 {
- return 0, ErrInvalidLengthAutonat
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupAutonat
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthAutonat
- }
- if depth == 0 {
- return iNdEx, nil
}
}
- return 0, io.ErrUnexpectedEOF
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_autonat_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_autonat_proto_goTypes,
+ DependencyIndexes: file_pb_autonat_proto_depIdxs,
+ EnumInfos: file_pb_autonat_proto_enumTypes,
+ MessageInfos: file_pb_autonat_proto_msgTypes,
+ }.Build()
+ File_pb_autonat_proto = out.File
+ file_pb_autonat_proto_rawDesc = nil
+ file_pb_autonat_proto_goTypes = nil
+ file_pb_autonat_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthAutonat = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowAutonat = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupAutonat = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go
index 93f273cd6..5bb2de064 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go
@@ -2,11 +2,13 @@ package autonat
import (
"github.com/libp2p/go-libp2p/core/peer"
- pb "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
ma "github.com/multiformats/go-multiaddr"
)
+//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/autonat.proto=./pb pb/autonat.proto
+
// AutoNATProto identifies the autonat service protocol
const AutoNATProto = "/libp2p/autonat/1.0.0"
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go
index 10136a7de..98b421c9b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go
@@ -10,9 +10,10 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
- pb "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+
+ "github.com/libp2p/go-msgio/pbio"
- "github.com/libp2p/go-msgio/protoio"
ma "github.com/multiformats/go-multiaddr"
)
@@ -69,8 +70,8 @@ func (as *autoNATService) handleStream(s network.Stream) {
pid := s.Conn().RemotePeer()
log.Debugf("New stream from %s", pid.Pretty())
- r := protoio.NewDelimitedReader(s, maxMsgSize)
- w := protoio.NewDelimitedWriter(s)
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ w := pbio.NewDelimitedWriter(s)
var req pb.Message
var res pb.Message
@@ -99,6 +100,9 @@ func (as *autoNATService) handleStream(s network.Stream) {
s.Reset()
return
}
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialResponse(res.GetDialResponse().GetStatus())
+ }
}
func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Message_PeerInfo) *pb.Message_DialResponse {
@@ -125,6 +129,9 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me
// need to know their public IP address, and it needs to be different from our public IP
// address.
if as.config.dialPolicy.skipDial(obsaddr) {
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialRefused(dial_blocked)
+ }
// Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code.
return newDialResponseError(pb.Message_E_DIAL_REFUSED, "refusing to dial peer with blocked observed address")
}
@@ -187,6 +194,9 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me
}
if len(addrs) == 0 {
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialRefused(no_valid_address)
+ }
// Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code.
return newDialResponseError(pb.Message_E_DIAL_REFUSED, "no dialable addresses")
}
@@ -201,6 +211,9 @@ func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse {
if count >= as.config.throttlePeerMax || (as.config.throttleGlobalMax > 0 &&
as.globalReqs >= as.config.throttleGlobalMax) {
as.mx.Unlock()
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialRefused(rate_limited)
+ }
return newDialResponseError(pb.Message_E_DIAL_REFUSED, "too many dials")
}
as.reqs[pi.ID] = count + 1
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go
index e4e3568ef..590079853 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go
@@ -2,12 +2,14 @@ package autorelay
import (
"context"
+ "errors"
"sync"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
basic "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
@@ -29,6 +31,8 @@ type AutoRelay struct {
host host.Host
addrsF basic.AddrsFactory
+
+ metricsTracer MetricsTracer
}
func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) {
@@ -46,18 +50,22 @@ func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) {
r.ctx, r.ctxCancel = context.WithCancel(context.Background())
r.conf = &conf
r.relayFinder = newRelayFinder(bhost, conf.peerSource, &conf)
+ r.metricsTracer = &wrappedMetricsTracer{conf.metricsTracer}
bhost.AddrsFactory = r.hostAddrs
+ return r, nil
+}
+
+func (r *AutoRelay) Start() {
r.refCount.Add(1)
go func() {
defer r.refCount.Done()
r.background()
}()
- return r, nil
}
func (r *AutoRelay) background() {
- subReachability, err := r.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
+ subReachability, err := r.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("autorelay (background)"))
if err != nil {
log.Debug("failed to subscribe to the EvtLocalReachabilityChanged")
return
@@ -76,11 +84,17 @@ func (r *AutoRelay) background() {
evt := ev.(event.EvtLocalReachabilityChanged)
switch evt.Reachability {
case network.ReachabilityPrivate, network.ReachabilityUnknown:
- if err := r.relayFinder.Start(); err != nil {
+ err := r.relayFinder.Start()
+ if errors.Is(err, errAlreadyRunning) {
+ log.Debug("tried to start already running relay finder")
+ } else if err != nil {
log.Errorw("failed to start relay finder", "error", err)
+ } else {
+ r.metricsTracer.RelayFinderStatus(true)
}
case network.ReachabilityPublic:
r.relayFinder.Stop()
+ r.metricsTracer.RelayFinderStatus(false)
}
r.mx.Lock()
r.status = evt.Reachability
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go
index 740ca2362..c6bd9c570 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go
@@ -14,6 +14,10 @@ func (h *AutoRelayHost) Close() error {
return h.Host.Close()
}
+func (h *AutoRelayHost) Start() {
+ h.ar.Start()
+}
+
func NewAutoRelayHost(h host.Host, ar *AutoRelay) *AutoRelayHost {
return &AutoRelayHost{Host: h, ar: ar}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go
new file mode 100644
index 000000000..8028655b0
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go
@@ -0,0 +1,373 @@
+package autorelay
+
+import (
+ "errors"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_autorelay"
+
+var (
+ status = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "status",
+ Help: "relay finder active",
+ })
+ reservationsOpenedTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservations_opened_total",
+ Help: "Reservations Opened",
+ },
+ )
+ reservationsClosedTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservations_closed_total",
+ Help: "Reservations Closed",
+ },
+ )
+ reservationRequestsOutcomeTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservation_requests_outcome_total",
+ Help: "Reservation Request Outcome",
+ },
+ []string{"request_type", "outcome"},
+ )
+
+ relayAddressesUpdatedTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "relay_addresses_updated_total",
+ Help: "Relay Addresses Updated Count",
+ },
+ )
+ relayAddressesCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "relay_addresses_count",
+ Help: "Relay Addresses Count",
+ },
+ )
+
+ candidatesCircuitV2SupportTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "candidates_circuit_v2_support_total",
+ Help: "Candidiates supporting circuit v2",
+ },
+ []string{"support"},
+ )
+ candidatesTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "candidates_total",
+ Help: "Candidates Total",
+ },
+ []string{"type"},
+ )
+ candLoopState = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "candidate_loop_state",
+ Help: "Candidate Loop State",
+ },
+ )
+
+ scheduledWorkTime = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "scheduled_work_time",
+ Help: "Scheduled Work Times",
+ },
+ []string{"work_type"},
+ )
+
+ desiredReservations = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "desired_reservations",
+ Help: "Desired Reservations",
+ },
+ )
+
+ collectors = []prometheus.Collector{
+ status,
+ reservationsOpenedTotal,
+ reservationsClosedTotal,
+ reservationRequestsOutcomeTotal,
+ relayAddressesUpdatedTotal,
+ relayAddressesCount,
+ candidatesCircuitV2SupportTotal,
+ candidatesTotal,
+ candLoopState,
+ scheduledWorkTime,
+ desiredReservations,
+ }
+)
+
+type candidateLoopState int
+
+const (
+ peerSourceRateLimited candidateLoopState = iota
+ waitingOnPeerChan
+ waitingForTrigger
+ stopped
+)
+
+// MetricsTracer is the interface for tracking metrics for autorelay
+type MetricsTracer interface {
+ RelayFinderStatus(isActive bool)
+
+ ReservationEnded(cnt int)
+ ReservationOpened(cnt int)
+ ReservationRequestFinished(isRefresh bool, err error)
+
+ RelayAddressCount(int)
+ RelayAddressUpdated()
+
+ CandidateChecked(supportsCircuitV2 bool)
+ CandidateAdded(cnt int)
+ CandidateRemoved(cnt int)
+ CandidateLoopState(state candidateLoopState)
+
+ ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes)
+
+ DesiredReservations(int)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+
+ // Initialise these counters to 0 otherwise the first reservation requests aren't handled
+ // correctly when using promql increse function
+ reservationRequestsOutcomeTotal.WithLabelValues("refresh", "success")
+ reservationRequestsOutcomeTotal.WithLabelValues("new", "success")
+ candidatesCircuitV2SupportTotal.WithLabelValues("yes")
+ candidatesCircuitV2SupportTotal.WithLabelValues("no")
+ return &metricsTracer{}
+}
+
+func (mt *metricsTracer) RelayFinderStatus(isActive bool) {
+ if isActive {
+ status.Set(1)
+ } else {
+ status.Set(0)
+ }
+}
+
+func (mt *metricsTracer) ReservationEnded(cnt int) {
+ reservationsClosedTotal.Add(float64(cnt))
+}
+
+func (mt *metricsTracer) ReservationOpened(cnt int) {
+ reservationsOpenedTotal.Add(float64(cnt))
+}
+
+func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ if isRefresh {
+ *tags = append(*tags, "refresh")
+ } else {
+ *tags = append(*tags, "new")
+ }
+ *tags = append(*tags, getReservationRequestStatus(err))
+ reservationRequestsOutcomeTotal.WithLabelValues(*tags...).Inc()
+
+ if !isRefresh && err == nil {
+ reservationsOpenedTotal.Inc()
+ }
+}
+
+func (mt *metricsTracer) RelayAddressUpdated() {
+ relayAddressesUpdatedTotal.Inc()
+}
+
+func (mt *metricsTracer) RelayAddressCount(cnt int) {
+ relayAddressesCount.Set(float64(cnt))
+}
+
+func (mt *metricsTracer) CandidateChecked(supportsCircuitV2 bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if supportsCircuitV2 {
+ *tags = append(*tags, "yes")
+ } else {
+ *tags = append(*tags, "no")
+ }
+ candidatesCircuitV2SupportTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) CandidateAdded(cnt int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "added")
+ candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
+}
+
+func (mt *metricsTracer) CandidateRemoved(cnt int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "removed")
+ candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
+}
+
+func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) {
+ candLoopState.Set(float64(state))
+}
+
+func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, "allowed peer source call")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextAllowedCallToPeerSource.Unix()))
+ *tags = (*tags)[:0]
+
+ *tags = append(*tags, "reservation refresh")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextRefresh.Unix()))
+ *tags = (*tags)[:0]
+
+ *tags = append(*tags, "clear backoff")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextBackoff.Unix()))
+ *tags = (*tags)[:0]
+
+ *tags = append(*tags, "old candidate check")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextOldCandidateCheck.Unix()))
+}
+
+func (mt *metricsTracer) DesiredReservations(cnt int) {
+ desiredReservations.Set(float64(cnt))
+}
+
+func getReservationRequestStatus(err error) string {
+ if err == nil {
+ return "success"
+ }
+
+ status := "err other"
+ var re client.ReservationError
+ if errors.As(err, &re) {
+ switch re.Status {
+ case pbv2.Status_CONNECTION_FAILED:
+ return "connection failed"
+ case pbv2.Status_MALFORMED_MESSAGE:
+ return "malformed message"
+ case pbv2.Status_RESERVATION_REFUSED:
+ return "reservation refused"
+ case pbv2.Status_PERMISSION_DENIED:
+ return "permission denied"
+ case pbv2.Status_RESOURCE_LIMIT_EXCEEDED:
+ return "resource limit exceeded"
+ }
+ }
+ return status
+}
+
+// wrappedMetricsTracer wraps MetricsTracer and ignores all calls when mt is nil
+type wrappedMetricsTracer struct {
+ mt MetricsTracer
+}
+
+var _ MetricsTracer = &wrappedMetricsTracer{}
+
+func (mt *wrappedMetricsTracer) RelayFinderStatus(isActive bool) {
+ if mt.mt != nil {
+ mt.mt.RelayFinderStatus(isActive)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ReservationEnded(cnt int) {
+ if mt.mt != nil {
+ mt.mt.ReservationEnded(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ReservationOpened(cnt int) {
+ if mt.mt != nil {
+ mt.mt.ReservationOpened(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
+ if mt.mt != nil {
+ mt.mt.ReservationRequestFinished(isRefresh, err)
+ }
+}
+
+func (mt *wrappedMetricsTracer) RelayAddressUpdated() {
+ if mt.mt != nil {
+ mt.mt.RelayAddressUpdated()
+ }
+}
+
+func (mt *wrappedMetricsTracer) RelayAddressCount(cnt int) {
+ if mt.mt != nil {
+ mt.mt.RelayAddressCount(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateChecked(supportsCircuitV2 bool) {
+ if mt.mt != nil {
+ mt.mt.CandidateChecked(supportsCircuitV2)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateAdded(cnt int) {
+ if mt.mt != nil {
+ mt.mt.CandidateAdded(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateRemoved(cnt int) {
+ if mt.mt != nil {
+ mt.mt.CandidateRemoved(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
+ if mt.mt != nil {
+ mt.mt.ScheduledWorkUpdated(scheduledWork)
+ }
+}
+
+func (mt *wrappedMetricsTracer) DesiredReservations(cnt int) {
+ if mt.mt != nil {
+ mt.mt.DesiredReservations(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateLoopState(state candidateLoopState) {
+ if mt.mt != nil {
+ mt.mt.CandidateLoopState(state)
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go
index e62f129de..26ba92018 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go
@@ -1,21 +1,31 @@
package autorelay
import (
+ "context"
"errors"
- "fmt"
"time"
"github.com/libp2p/go-libp2p/core/peer"
-
- "github.com/benbjohnson/clock"
)
+// AutoRelay will call this function when it needs new candidates because it is
+// not connected to the desired number of relays or we get disconnected from one
+// of the relays. Implementations must send *at most* numPeers, and close the
+// channel when they don't intend to provide any more peers. AutoRelay will not
+// call the callback again until the channel is closed. Implementations should
+// send new peers, but may send peers they sent before. AutoRelay implements a
+// per-peer backoff (see WithBackoff). See WithMinInterval for setting the
+// minimum interval between calls to the callback. The context.Context passed
+// may be canceled when AutoRelay feels satisfied, it will be canceled when the
+// node is shutting down. If the context is canceled you MUST close the output
+// channel at some point.
+type PeerSource func(ctx context.Context, num int) <-chan peer.AddrInfo
+
type config struct {
- clock clock.Clock
- peerSource func(num int) <-chan peer.AddrInfo
+ clock ClockWithInstantTimer
+ peerSource PeerSource
// minimum interval used to call the peerSource callback
- minInterval time.Duration
- staticRelays []peer.AddrInfo
+ minInterval time.Duration
// see WithMinCandidates
minCandidates int
// see WithMaxCandidates
@@ -30,85 +40,60 @@ type config struct {
// see WithMaxCandidateAge
maxCandidateAge time.Duration
setMinCandidates bool
- enableCircuitV1 bool
+ // see WithMetricsTracer
+ metricsTracer MetricsTracer
}
var defaultConfig = config{
- clock: clock.New(),
+ clock: RealClock{},
minCandidates: 4,
maxCandidates: 20,
bootDelay: 3 * time.Minute,
backoff: time.Hour,
desiredRelays: 2,
maxCandidateAge: 30 * time.Minute,
+ minInterval: 30 * time.Second,
}
var (
- errStaticRelaysMinCandidates = errors.New("cannot use WithMinCandidates and WithStaticRelays")
- errStaticRelaysPeerSource = errors.New("cannot use WithPeerSource and WithStaticRelays")
+ errAlreadyHavePeerSource = errors.New("can only use a single WithPeerSource or WithStaticRelays")
)
-// DefaultRelays are the known PL-operated v1 relays; will be decommissioned in 2022.
-var DefaultRelays = []string{
- "/ip4/147.75.80.110/tcp/4001/p2p/QmbFgm5zan8P6eWWmeyfncR5feYEMPbht5b1FW1C37aQ7y",
- "/ip4/147.75.80.110/udp/4001/quic/p2p/QmbFgm5zan8P6eWWmeyfncR5feYEMPbht5b1FW1C37aQ7y",
- "/ip4/147.75.195.153/tcp/4001/p2p/QmW9m57aiBDHAkKj9nmFSEn7ZqrcF1fZS4bipsTCHburei",
- "/ip4/147.75.195.153/udp/4001/quic/p2p/QmW9m57aiBDHAkKj9nmFSEn7ZqrcF1fZS4bipsTCHburei",
- "/ip4/147.75.70.221/tcp/4001/p2p/Qme8g49gm3q4Acp7xWBKg3nAa9fxZ1YmyDJdyGgoG6LsXh",
- "/ip4/147.75.70.221/udp/4001/quic/p2p/Qme8g49gm3q4Acp7xWBKg3nAa9fxZ1YmyDJdyGgoG6LsXh",
-}
-
-var defaultStaticRelays []peer.AddrInfo
-
-func init() {
- for _, s := range DefaultRelays {
- pi, err := peer.AddrInfoFromString(s)
- if err != nil {
- panic(fmt.Sprintf("failed to initialize default static relays: %s", err))
- }
- defaultStaticRelays = append(defaultStaticRelays, *pi)
- }
-}
-
type Option func(*config) error
func WithStaticRelays(static []peer.AddrInfo) Option {
return func(c *config) error {
- if c.setMinCandidates {
- return errStaticRelaysMinCandidates
- }
if c.peerSource != nil {
- return errStaticRelaysPeerSource
+ return errAlreadyHavePeerSource
}
- if len(c.staticRelays) > 0 {
- return errors.New("can't set static relays, static relays already configured")
- }
- c.minCandidates = len(static)
- c.staticRelays = static
+
+ WithPeerSource(func(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
+ if len(static) < numPeers {
+ numPeers = len(static)
+ }
+ c := make(chan peer.AddrInfo, numPeers)
+ defer close(c)
+
+ for i := 0; i < numPeers; i++ {
+ c <- static[i]
+ }
+ return c
+ })(c)
+ WithMinCandidates(len(static))(c)
+ WithMaxCandidates(len(static))(c)
+ WithNumRelays(len(static))(c)
+
return nil
}
}
-func WithDefaultStaticRelays() Option {
- return WithStaticRelays(defaultStaticRelays)
-}
-
// WithPeerSource defines a callback for AutoRelay to query for more relay candidates.
-// AutoRelay will call this function when it needs new candidates is connected to the desired number of
-// relays, and it has enough candidates (in case we get disconnected from one of the relays).
-// Implementations must send *at most* numPeers, and close the channel when they don't intend to provide
-// any more peers.
-// AutoRelay will not call the callback again until the channel is closed.
-// Implementations should send new peers, but may send peers they sent before. AutoRelay implements
-// a per-peer backoff (see WithBackoff).
-// minInterval is the minimum interval this callback is called with, even if AutoRelay needs new candidates.
-func WithPeerSource(f func(numPeers int) <-chan peer.AddrInfo, minInterval time.Duration) Option {
+func WithPeerSource(f PeerSource) Option {
return func(c *config) error {
- if len(c.staticRelays) > 0 {
- return errStaticRelaysPeerSource
+ if c.peerSource != nil {
+ return errAlreadyHavePeerSource
}
c.peerSource = f
- c.minInterval = minInterval
return nil
}
}
@@ -137,9 +122,6 @@ func WithMaxCandidates(n int) Option {
// This is to make sure that we don't just randomly connect to the first candidate that we discover.
func WithMinCandidates(n int) Option {
return func(c *config) error {
- if len(c.staticRelays) > 0 {
- return errStaticRelaysMinCandidates
- }
if n > c.maxCandidates {
n = c.maxCandidates
}
@@ -168,14 +150,6 @@ func WithBackoff(d time.Duration) Option {
}
}
-// WithCircuitV1Support enables support for circuit v1 relays.
-func WithCircuitV1Support() Option {
- return func(c *config) error {
- c.enableCircuitV1 = true
- return nil
- }
-}
-
// WithMaxCandidateAge sets the maximum age of a candidate.
// When we are connected to the desired number of relays, we don't ask the peer source for new candidates.
// This can lead to AutoRelay's candidate list becoming outdated, and means we won't be able
@@ -188,9 +162,72 @@ func WithMaxCandidateAge(d time.Duration) Option {
}
}
-func WithClock(cl clock.Clock) Option {
+// InstantTimer is a timer that triggers at some instant rather than some duration
+type InstantTimer interface {
+ Reset(d time.Time) bool
+ Stop() bool
+ Ch() <-chan time.Time
+}
+
+// ClockWithInstantTimer is a clock that can create timers that trigger at some
+// instant rather than some duration
+type ClockWithInstantTimer interface {
+ Now() time.Time
+ Since(t time.Time) time.Duration
+ InstantTimer(when time.Time) InstantTimer
+}
+
+type RealTimer struct{ t *time.Timer }
+
+var _ InstantTimer = (*RealTimer)(nil)
+
+func (t RealTimer) Ch() <-chan time.Time {
+ return t.t.C
+}
+
+func (t RealTimer) Reset(d time.Time) bool {
+ return t.t.Reset(time.Until(d))
+}
+
+func (t RealTimer) Stop() bool {
+ return t.t.Stop()
+}
+
+type RealClock struct{}
+
+var _ ClockWithInstantTimer = RealClock{}
+
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+func (RealClock) Since(t time.Time) time.Duration {
+ return time.Since(t)
+}
+func (RealClock) InstantTimer(when time.Time) InstantTimer {
+ t := time.NewTimer(time.Until(when))
+ return &RealTimer{t}
+}
+
+func WithClock(cl ClockWithInstantTimer) Option {
return func(c *config) error {
c.clock = cl
return nil
}
}
+
+// WithMinInterval sets the minimum interval after which peerSource callback will be called for more
+// candidates even if AutoRelay needs new candidates.
+func WithMinInterval(interval time.Duration) Option {
+ return func(c *config) error {
+ c.minInterval = interval
+ return nil
+ }
+}
+
+// WithMetricsTracer configures autorelay to use mt to track metrics
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(c *config) error {
+ c.metricsTracer = mt
+ return nil
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go
index a62947d38..3133b7a51 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go
@@ -14,7 +14,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
basic "github.com/libp2p/go-libp2p/p2p/host/basic"
- relayv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
circuitv2_proto "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
@@ -22,13 +22,10 @@ import (
manet "github.com/multiformats/go-multiaddr/net"
)
-const (
- protoIDv1 = string(relayv1.ProtoID)
- protoIDv2 = string(circuitv2_proto.ProtoIDv2Hop)
-)
+const protoIDv2 = circuitv2_proto.ProtoIDv2Hop
// Terminology:
-// Candidate: Once we connect to a node and it supports (v1 / v2) relay protocol,
+// Candidate: Once we connect to a node and it supports relay protocol,
// we call it a candidate, and consider using it as a relay.
// Relay: Out of the list of candidates, we select a relay to connect to.
// Currently, we just randomly select a candidate, but we can employ more sophisticated
@@ -59,7 +56,7 @@ type relayFinder struct {
ctxCancel context.CancelFunc
ctxCancelMx sync.Mutex
- peerSource func(int) <-chan peer.AddrInfo
+ peerSource PeerSource
candidateFound chan struct{} // receives every time we find a new relay candidate
candidateMx sync.Mutex
@@ -76,13 +73,23 @@ type relayFinder struct {
relayUpdated chan struct{}
relayMx sync.Mutex
- relays map[peer.ID]*circuitv2.Reservation // rsvp will be nil if it is a v1 relay
+ relays map[peer.ID]*circuitv2.Reservation
cachedAddrs []ma.Multiaddr
cachedAddrsExpiry time.Time
+
+ // A channel that triggers a run of `runScheduledWork`.
+ triggerRunScheduledWork chan struct{}
+ metricsTracer MetricsTracer
}
-func newRelayFinder(host *basic.BasicHost, peerSource func(int) <-chan peer.AddrInfo, conf *config) *relayFinder {
+var errAlreadyRunning = errors.New("relayFinder already running")
+
+func newRelayFinder(host *basic.BasicHost, peerSource PeerSource, conf *config) *relayFinder {
+ if peerSource == nil {
+ panic("Can not create a new relayFinder. Need a Peer Source fn or a list of static relays. Refer to the documentation around `libp2p.EnableAutoRelay`")
+ }
+
return &relayFinder{
bootTime: conf.clock.Now(),
host: host,
@@ -93,25 +100,28 @@ func newRelayFinder(host *basic.BasicHost, peerSource func(int) <-chan peer.Addr
candidateFound: make(chan struct{}, 1),
maybeConnectToRelayTrigger: make(chan struct{}, 1),
maybeRequestNewCandidates: make(chan struct{}, 1),
+ triggerRunScheduledWork: make(chan struct{}, 1),
relays: make(map[peer.ID]*circuitv2.Reservation),
relayUpdated: make(chan struct{}, 1),
+ metricsTracer: &wrappedMetricsTracer{conf.metricsTracer},
}
}
+type scheduledWorkTimes struct {
+ leastFrequentInterval time.Duration
+ nextRefresh time.Time
+ nextBackoff time.Time
+ nextOldCandidateCheck time.Time
+ nextAllowedCallToPeerSource time.Time
+}
+
func (rf *relayFinder) background(ctx context.Context) {
- if rf.usesStaticRelay() {
- rf.refCount.Add(1)
- go func() {
- defer rf.refCount.Done()
- rf.handleStaticRelays(ctx)
- }()
- } else {
- rf.refCount.Add(1)
- go func() {
- defer rf.refCount.Done()
- rf.findNodes(ctx)
- }()
- }
+ peerSourceRateLimiter := make(chan struct{}, 1)
+ rf.refCount.Add(1)
+ go func() {
+ defer rf.refCount.Done()
+ rf.findNodes(ctx, peerSourceRateLimiter)
+ }()
rf.refCount.Add(1)
go func() {
@@ -119,26 +129,42 @@ func (rf *relayFinder) background(ctx context.Context) {
rf.handleNewCandidates(ctx)
}()
- subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged))
+ subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged), eventbus.Name("autorelay (relay finder)"))
if err != nil {
log.Error("failed to subscribe to the EvtPeerConnectednessChanged")
return
}
defer subConnectedness.Close()
- bootDelayTimer := rf.conf.clock.Timer(rf.conf.bootDelay)
+ now := rf.conf.clock.Now()
+ bootDelayTimer := rf.conf.clock.InstantTimer(now.Add(rf.conf.bootDelay))
defer bootDelayTimer.Stop()
- refreshTicker := rf.conf.clock.Ticker(rsvpRefreshInterval)
- defer refreshTicker.Stop()
- backoffTicker := rf.conf.clock.Ticker(rf.conf.backoff / 5)
- defer backoffTicker.Stop()
- oldCandidateTicker := rf.conf.clock.Ticker(rf.conf.maxCandidateAge / 5)
- defer oldCandidateTicker.Stop()
- for {
- // when true, we need to identify push
- var push bool
+ // This is the least frequent event. It's our fallback timer if we don't have any other work to do.
+ leastFrequentInterval := rf.conf.minInterval
+ // Check if leastFrequentInterval is 0 to avoid busy looping
+ if rf.conf.backoff > leastFrequentInterval || leastFrequentInterval == 0 {
+ leastFrequentInterval = rf.conf.backoff
+ }
+ if rf.conf.maxCandidateAge > leastFrequentInterval || leastFrequentInterval == 0 {
+ leastFrequentInterval = rf.conf.maxCandidateAge
+ }
+ if rsvpRefreshInterval > leastFrequentInterval || leastFrequentInterval == 0 {
+ leastFrequentInterval = rsvpRefreshInterval
+ }
+
+ scheduledWork := &scheduledWorkTimes{
+ leastFrequentInterval: leastFrequentInterval,
+ nextRefresh: now.Add(rsvpRefreshInterval),
+ nextBackoff: now.Add(rf.conf.backoff),
+ nextOldCandidateCheck: now.Add(rf.conf.maxCandidateAge),
+ nextAllowedCallToPeerSource: now.Add(-time.Second), // allow immediately
+ }
+ workTimer := rf.conf.clock.InstantTimer(rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter))
+ defer workTimer.Stop()
+
+ for {
select {
case ev, ok := <-subConnectedness.Out():
if !ok {
@@ -148,6 +174,8 @@ func (rf *relayFinder) background(ctx context.Context) {
if evt.Connectedness != network.NotConnected {
continue
}
+ push := false
+
rf.relayMx.Lock()
if rf.usingRelay(evt.Peer) { // we were disconnected from a relay
log.Debugw("disconnected from relay", "id", evt.Peer)
@@ -157,85 +185,182 @@ func (rf *relayFinder) background(ctx context.Context) {
push = true
}
rf.relayMx.Unlock()
+
+ if push {
+ rf.clearCachedAddrsAndSignalAddressChange()
+ rf.metricsTracer.ReservationEnded(1)
+ }
case <-rf.candidateFound:
rf.notifyMaybeConnectToRelay()
- case <-bootDelayTimer.C:
+ case <-bootDelayTimer.Ch():
rf.notifyMaybeConnectToRelay()
case <-rf.relayUpdated:
- push = true
- case now := <-refreshTicker.C:
- push = rf.refreshReservations(ctx, now)
- case now := <-backoffTicker.C:
- rf.candidateMx.Lock()
- for id, t := range rf.backoff {
- if !t.Add(rf.conf.backoff).After(now) {
- log.Debugw("removing backoff for node", "id", id)
- delete(rf.backoff, id)
- }
- }
- rf.candidateMx.Unlock()
- case now := <-oldCandidateTicker.C:
- var deleted bool
- rf.candidateMx.Lock()
- for id, cand := range rf.candidates {
- if !cand.added.Add(rf.conf.maxCandidateAge).After(now) {
- deleted = true
- log.Debugw("deleting candidate due to age", "id", id)
- delete(rf.candidates, id)
- }
- }
- rf.candidateMx.Unlock()
- if deleted {
- rf.notifyMaybeNeedNewCandidates()
- }
+ rf.clearCachedAddrsAndSignalAddressChange()
+ case now := <-workTimer.Ch():
+ // Note: `now` is not guaranteed to be the current time. It's the time
+ // that the timer was fired. This is okay because we'll schedule
+ // future work at a specific time.
+ nextTime := rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter)
+ workTimer.Reset(nextTime)
+ case <-rf.triggerRunScheduledWork:
+ // Ignore the next time because we aren't scheduling any future work here
+ _ = rf.runScheduledWork(ctx, rf.conf.clock.Now(), scheduledWork, peerSourceRateLimiter)
case <-ctx.Done():
return
}
+ }
+}
- if push {
- rf.relayMx.Lock()
- rf.cachedAddrs = nil
- rf.relayMx.Unlock()
- rf.host.SignalAddressChange()
+func (rf *relayFinder) clearCachedAddrsAndSignalAddressChange() {
+ rf.relayMx.Lock()
+ rf.cachedAddrs = nil
+ rf.relayMx.Unlock()
+ rf.host.SignalAddressChange()
+
+ rf.metricsTracer.RelayAddressUpdated()
+}
+
+func (rf *relayFinder) runScheduledWork(ctx context.Context, now time.Time, scheduledWork *scheduledWorkTimes, peerSourceRateLimiter chan<- struct{}) time.Time {
+ nextTime := now.Add(scheduledWork.leastFrequentInterval)
+
+ if now.After(scheduledWork.nextRefresh) {
+ scheduledWork.nextRefresh = now.Add(rsvpRefreshInterval)
+ if rf.refreshReservations(ctx, now) {
+ rf.clearCachedAddrsAndSignalAddressChange()
+ }
+ }
+
+ if now.After(scheduledWork.nextBackoff) {
+ scheduledWork.nextBackoff = rf.clearBackoff(now)
+ }
+
+ if now.After(scheduledWork.nextOldCandidateCheck) {
+ scheduledWork.nextOldCandidateCheck = rf.clearOldCandidates(now)
+ }
+
+ if now.After(scheduledWork.nextAllowedCallToPeerSource) {
+ select {
+ case peerSourceRateLimiter <- struct{}{}:
+ scheduledWork.nextAllowedCallToPeerSource = now.Add(rf.conf.minInterval)
+ if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) {
+ nextTime = scheduledWork.nextAllowedCallToPeerSource
+ }
+ default:
+ }
+ } else {
+ // We still need to schedule this work if it's sooner than nextTime
+ if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) {
+ nextTime = scheduledWork.nextAllowedCallToPeerSource
+ }
+ }
+
+ // Find the next time we need to run scheduled work.
+ if scheduledWork.nextRefresh.Before(nextTime) {
+ nextTime = scheduledWork.nextRefresh
+ }
+ if scheduledWork.nextBackoff.Before(nextTime) {
+ nextTime = scheduledWork.nextBackoff
+ }
+ if scheduledWork.nextOldCandidateCheck.Before(nextTime) {
+ nextTime = scheduledWork.nextOldCandidateCheck
+ }
+ if nextTime == now {
+ // Only happens in CI with a mock clock
+ nextTime = nextTime.Add(1) // avoids an infinite loop
+ }
+
+ rf.metricsTracer.ScheduledWorkUpdated(scheduledWork)
+
+ return nextTime
+}
+
+// clearOldCandidates clears old candidates from the map. Returns the next time
+// to run this function.
+func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time {
+ // If we don't have any candidates, we should run this again in rf.conf.maxCandidateAge.
+ nextTime := now.Add(rf.conf.maxCandidateAge)
+
+ var deleted bool
+ rf.candidateMx.Lock()
+ defer rf.candidateMx.Unlock()
+ for id, cand := range rf.candidates {
+ expiry := cand.added.Add(rf.conf.maxCandidateAge)
+ if expiry.After(now) {
+ if expiry.Before(nextTime) {
+ nextTime = expiry
+ }
+ } else {
+ log.Debugw("deleting candidate due to age", "id", id)
+ deleted = true
+ rf.removeCandidate(id)
+ }
+ }
+ if deleted {
+ rf.notifyMaybeNeedNewCandidates()
+ }
+
+ return nextTime
+}
+
+// clearBackoff clears old backoff entries from the map. Returns the next time
+// to run this function.
+func (rf *relayFinder) clearBackoff(now time.Time) time.Time {
+ nextTime := now.Add(rf.conf.backoff)
+
+ rf.candidateMx.Lock()
+ defer rf.candidateMx.Unlock()
+ for id, t := range rf.backoff {
+ expiry := t.Add(rf.conf.backoff)
+ if expiry.After(now) {
+ if expiry.Before(nextTime) {
+ nextTime = expiry
+ }
+ } else {
+ log.Debugw("removing backoff for node", "id", id)
+ delete(rf.backoff, id)
}
}
+
+ return nextTime
}
// findNodes accepts nodes from the channel and tests if they support relaying.
// It is run on both public and private nodes.
// It garbage collects old entries, so that nodes doesn't overflow.
// This makes sure that as soon as we need to find relay candidates, we have them available.
-func (rf *relayFinder) findNodes(ctx context.Context) {
- peerChan := rf.peerSource(rf.conf.maxCandidates)
+// peerSourceRateLimiter is used to limit how often we call the peer source.
+func (rf *relayFinder) findNodes(ctx context.Context, peerSourceRateLimiter <-chan struct{}) {
+ var peerChan <-chan peer.AddrInfo
var wg sync.WaitGroup
- lastCallToPeerSource := rf.conf.clock.Now()
-
- timer := newTimer(rf.conf.clock)
for {
rf.candidateMx.Lock()
numCandidates := len(rf.candidates)
rf.candidateMx.Unlock()
- if peerChan == nil {
- now := rf.conf.clock.Now()
- nextAllowedCallToPeerSource := lastCallToPeerSource.Add(rf.conf.minInterval).Sub(now)
- if numCandidates < rf.conf.minCandidates {
- log.Debugw("not enough candidates. Resetting timer", "num", numCandidates, "desired", rf.conf.minCandidates)
- timer.Reset(nextAllowedCallToPeerSource)
+ if peerChan == nil && numCandidates < rf.conf.minCandidates {
+ rf.metricsTracer.CandidateLoopState(peerSourceRateLimited)
+
+ select {
+ case <-peerSourceRateLimiter:
+ peerChan = rf.peerSource(ctx, rf.conf.maxCandidates)
+ select {
+ case rf.triggerRunScheduledWork <- struct{}{}:
+ default:
+ }
+ case <-ctx.Done():
+ return
}
}
+ if peerChan == nil {
+ rf.metricsTracer.CandidateLoopState(waitingForTrigger)
+ } else {
+ rf.metricsTracer.CandidateLoopState(waitingOnPeerChan)
+ }
+
select {
case <-rf.maybeRequestNewCandidates:
continue
- case now := <-timer.Chan():
- timer.SetRead()
- if peerChan != nil {
- // We're still reading peers from the peerChan. No need to query for more peers now.
- continue
- }
- lastCallToPeerSource = now
- peerChan = rf.peerSource(rf.conf.maxCandidates)
case pi, ok := <-peerChan:
if !ok {
wg.Wait()
@@ -265,28 +390,12 @@ func (rf *relayFinder) findNodes(ctx context.Context) {
}
}()
case <-ctx.Done():
+ rf.metricsTracer.CandidateLoopState(stopped)
return
}
}
}
-func (rf *relayFinder) handleStaticRelays(ctx context.Context) {
- sem := make(chan struct{}, 4)
- var wg sync.WaitGroup
- wg.Add(len(rf.conf.staticRelays))
- for _, pi := range rf.conf.staticRelays {
- sem <- struct{}{}
- go func(pi peer.AddrInfo) {
- defer wg.Done()
- defer func() { <-sem }()
- rf.handleNewNode(ctx, pi)
- }(pi)
- }
- wg.Wait()
- log.Debug("processed all static relays")
- rf.notifyNewCandidate()
-}
-
func (rf *relayFinder) notifyMaybeConnectToRelay() {
select {
case rf.maybeConnectToRelayTrigger <- struct{}{}:
@@ -308,7 +417,7 @@ func (rf *relayFinder) notifyNewCandidate() {
}
}
-// handleNewNode tests if a peer supports circuit v1 or v2.
+// handleNewNode tests if a peer supports circuit v2.
// This method is only run on private nodes.
// If a peer does, it is added to the candidates map.
// Note that just supporting the protocol doesn't guarantee that we can also obtain a reservation.
@@ -325,24 +434,31 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add
supportsV2, err := rf.tryNode(ctx, pi)
if err != nil {
log.Debugf("node %s not accepted as a candidate: %s", pi.ID, err)
+ if err == errProtocolNotSupported {
+ rf.metricsTracer.CandidateChecked(false)
+ }
return false
}
+ rf.metricsTracer.CandidateChecked(true)
+
rf.candidateMx.Lock()
if len(rf.candidates) > rf.conf.maxCandidates {
rf.candidateMx.Unlock()
return false
}
log.Debugw("node supports relay protocol", "peer", pi.ID, "supports circuit v2", supportsV2)
- rf.candidates[pi.ID] = &candidate{
+ rf.addCandidate(&candidate{
added: rf.conf.clock.Now(),
ai: pi,
supportsRelayV2: supportsV2,
- }
+ })
rf.candidateMx.Unlock()
return true
}
-// tryNode checks if a peer actually supports either circuit v1 or circuit v2.
+var errProtocolNotSupported = errors.New("doesn't speak circuit v2")
+
+// tryNode checks if a peer actually supports either circuit v2.
// It does not modify any internal state.
func (rf *relayFinder) tryNode(ctx context.Context, pi peer.AddrInfo) (supportsRelayV2 bool, err error) {
if err := rf.host.Connect(ctx, pi); err != nil {
@@ -377,61 +493,26 @@ func (rf *relayFinder) tryNode(ctx context.Context, pi peer.AddrInfo) (supportsR
return false, ctx.Err()
}
- protos, err := rf.host.Peerstore().SupportsProtocols(pi.ID, protoIDv1, protoIDv2)
+ protos, err := rf.host.Peerstore().SupportsProtocols(pi.ID, protoIDv2)
if err != nil {
return false, fmt.Errorf("error checking relay protocol support for peer %s: %w", pi.ID, err)
}
-
- // If the node speaks both, prefer circuit v2
- var maybeSupportsV1, supportsV2 bool
- for _, proto := range protos {
- switch proto {
- case protoIDv1:
- maybeSupportsV1 = true
- case protoIDv2:
- supportsV2 = true
- }
- }
-
- if supportsV2 {
- return true, nil
- }
-
- if !rf.conf.enableCircuitV1 && !supportsV2 {
- return false, errors.New("doesn't speak circuit v2")
- }
- if !maybeSupportsV1 && !supportsV2 {
- return false, errors.New("doesn't speak circuit v1 or v2")
- }
-
- // The node *may* support circuit v1.
- supportsV1, err := relayv1.CanHop(ctx, rf.host, pi.ID)
- if err != nil {
- return false, fmt.Errorf("CanHop failed: %w", err)
+ if len(protos) == 0 {
+ return false, errProtocolNotSupported
}
- if !supportsV1 {
- return false, errors.New("doesn't speak circuit v1 or v2")
- }
- return false, nil
+ return true, nil
}
// When a new node that could be a relay is found, we receive a notification on the maybeConnectToRelayTrigger chan.
// This function makes sure that we only run one instance of maybeConnectToRelay at once, and buffers
// exactly one more trigger event to run maybeConnectToRelay.
func (rf *relayFinder) handleNewCandidates(ctx context.Context) {
- sem := make(chan struct{}, 1)
for {
select {
case <-ctx.Done():
return
case <-rf.maybeConnectToRelayTrigger:
- select {
- case <-ctx.Done():
- return
- case sem <- struct{}{}:
- }
rf.maybeConnectToRelay(ctx)
- <-sem
}
}
}
@@ -446,7 +527,7 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) {
}
rf.candidateMx.Lock()
- if !rf.usesStaticRelay() && len(rf.relays) == 0 && len(rf.candidates) < rf.conf.minCandidates && rf.conf.clock.Since(rf.bootTime) < rf.conf.bootDelay {
+ if len(rf.relays) == 0 && len(rf.candidates) < rf.conf.minCandidates && rf.conf.clock.Since(rf.bootTime) < rf.conf.bootDelay {
// During the startup phase, we don't want to connect to the first candidate that we find.
// Instead, we wait until we've found at least minCandidates, and then select the best of those.
// However, if that takes too long (longer than bootDelay), we still go ahead.
@@ -469,7 +550,7 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) {
rf.relayMx.Unlock()
if usingRelay {
rf.candidateMx.Lock()
- delete(rf.candidates, id)
+ rf.removeCandidate(id)
rf.candidateMx.Unlock()
rf.notifyMaybeNeedNewCandidates()
continue
@@ -478,6 +559,7 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) {
if err != nil {
log.Debugw("failed to connect to relay", "peer", id, "error", err)
rf.notifyMaybeNeedNewCandidates()
+ rf.metricsTracer.ReservationRequestFinished(false, err)
continue
}
log.Debugw("adding new relay", "id", id)
@@ -494,6 +576,8 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) {
default:
}
+ rf.metricsTracer.ReservationRequestFinished(false, nil)
+
if numRelays >= rf.conf.desiredRelays {
break
}
@@ -512,7 +596,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci
if rf.host.Network().Connectedness(id) != network.Connected {
if err := rf.host.Connect(ctx, cand.ai); err != nil {
rf.candidateMx.Lock()
- delete(rf.candidates, cand.ai.ID)
+ rf.removeCandidate(cand.ai.ID)
rf.candidateMx.Unlock()
return nil, fmt.Errorf("failed to connect: %w", err)
}
@@ -529,7 +613,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci
}
}
rf.candidateMx.Lock()
- delete(rf.candidates, id)
+ rf.removeCandidate(id)
rf.candidateMx.Unlock()
return rsvp, err
}
@@ -540,15 +624,17 @@ func (rf *relayFinder) refreshReservations(ctx context.Context, now time.Time) b
// find reservations about to expire and refresh them in parallel
g := new(errgroup.Group)
for p, rsvp := range rf.relays {
- if rsvp == nil { // this is a circuit v1 relay, there is no reservation
- continue
- }
if now.Add(rsvpExpirationSlack).Before(rsvp.Expiration) {
continue
}
p := p
- g.Go(func() error { return rf.refreshRelayReservation(ctx, p) })
+ g.Go(func() error {
+ err := rf.refreshRelayReservation(ctx, p)
+ rf.metricsTracer.ReservationRequestFinished(true, err)
+
+ return err
+ })
}
rf.relayMx.Unlock()
@@ -560,19 +646,22 @@ func (rf *relayFinder) refreshRelayReservation(ctx context.Context, p peer.ID) e
rsvp, err := circuitv2.Reserve(ctx, rf.host, peer.AddrInfo{ID: p})
rf.relayMx.Lock()
- defer rf.relayMx.Unlock()
-
if err != nil {
log.Debugw("failed to refresh relay slot reservation", "relay", p, "error", err)
-
+ _, exists := rf.relays[p]
delete(rf.relays, p)
// unprotect the connection
rf.host.ConnManager().Unprotect(p, autorelayTag)
+ rf.relayMx.Unlock()
+ if exists {
+ rf.metricsTracer.ReservationEnded(1)
+ }
return err
}
log.Debugw("refreshed relay slot reservation", "relay", p)
rf.relays[p] = rsvp
+ rf.relayMx.Unlock()
return nil
}
@@ -582,12 +671,32 @@ func (rf *relayFinder) usingRelay(p peer.ID) bool {
return ok
}
+// addCandidates adds a candidate to the candidates set. Assumes caller holds candidateMx mutex
+func (rf *relayFinder) addCandidate(cand *candidate) {
+ _, exists := rf.candidates[cand.ai.ID]
+ rf.candidates[cand.ai.ID] = cand
+ if !exists {
+ rf.metricsTracer.CandidateAdded(1)
+ }
+}
+
+func (rf *relayFinder) removeCandidate(id peer.ID) {
+ _, exists := rf.candidates[id]
+ if exists {
+ delete(rf.candidates, id)
+ rf.metricsTracer.CandidateRemoved(1)
+ }
+}
+
// selectCandidates returns an ordered slice of relay candidates.
// Callers should attempt to obtain reservations with the candidates in this order.
func (rf *relayFinder) selectCandidates() []*candidate {
+ now := rf.conf.clock.Now()
candidates := make([]*candidate, 0, len(rf.candidates))
for _, cand := range rf.candidates {
- candidates = append(candidates, cand)
+ if cand.added.Add(rf.conf.maxCandidateAge).After(now) {
+ candidates = append(candidates, cand)
+ }
}
// TODO: better relay selection strategy; this just selects random relays,
@@ -623,9 +732,10 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
}
// add relay specific addrs to the list
+ relayAddrCnt := 0
for p := range rf.relays {
addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p))
-
+ relayAddrCnt += len(addrs)
circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p.Pretty()))
for _, addr := range addrs {
pub := addr.Encapsulate(circuit)
@@ -636,20 +746,20 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
rf.cachedAddrs = raddrs
rf.cachedAddrsExpiry = rf.conf.clock.Now().Add(30 * time.Second)
+ rf.metricsTracer.RelayAddressCount(relayAddrCnt)
return raddrs
}
-func (rf *relayFinder) usesStaticRelay() bool {
- return len(rf.conf.staticRelays) > 0
-}
-
func (rf *relayFinder) Start() error {
rf.ctxCancelMx.Lock()
defer rf.ctxCancelMx.Unlock()
if rf.ctxCancel != nil {
- return errors.New("relayFinder already running")
+ return errAlreadyRunning
}
log.Debug("starting relay finder")
+
+ rf.initMetrics()
+
ctx, cancel := context.WithCancel(context.Background())
rf.ctxCancel = cancel
rf.refCount.Add(1)
@@ -669,5 +779,32 @@ func (rf *relayFinder) Stop() error {
}
rf.refCount.Wait()
rf.ctxCancel = nil
+
+ rf.resetMetrics()
return nil
}
+
+func (rf *relayFinder) initMetrics() {
+ rf.metricsTracer.DesiredReservations(rf.conf.desiredRelays)
+
+ rf.relayMx.Lock()
+ rf.metricsTracer.ReservationOpened(len(rf.relays))
+ rf.relayMx.Unlock()
+
+ rf.candidateMx.Lock()
+ rf.metricsTracer.CandidateAdded(len(rf.candidates))
+ rf.candidateMx.Unlock()
+}
+
+func (rf *relayFinder) resetMetrics() {
+ rf.relayMx.Lock()
+ rf.metricsTracer.ReservationEnded(len(rf.relays))
+ rf.relayMx.Unlock()
+
+ rf.candidateMx.Lock()
+ rf.metricsTracer.CandidateRemoved(len(rf.candidates))
+ rf.candidateMx.Unlock()
+
+ rf.metricsTracer.RelayAddressCount(0)
+ rf.metricsTracer.ScheduledWorkUpdated(&scheduledWorkTimes{})
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go
deleted file mode 100644
index b55445530..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package autorelay
-
-import (
- "time"
-
- "github.com/benbjohnson/clock"
-)
-
-type timer struct {
- timer *clock.Timer
- running bool
- read bool
-}
-
-func newTimer(cl clock.Clock) *timer {
- t := cl.Timer(100 * time.Hour) // There's no way to initialize a stopped timer
- t.Stop()
- return &timer{timer: t}
-}
-
-func (t *timer) Chan() <-chan time.Time {
- return t.timer.C
-}
-
-func (t *timer) Stop() {
- if !t.running {
- return
- }
- if !t.timer.Stop() && !t.read {
- <-t.timer.C
- }
- t.read = false
-}
-
-func (t *timer) SetRead() {
- t.read = true
-}
-
-func (t *timer) Reset(d time.Duration) {
- t.Stop()
- t.timer.Reset(d)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go
index d3642bd81..37cfa1099 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go
@@ -18,30 +18,27 @@ import (
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/core/transport"
"github.com/libp2p/go-libp2p/p2p/host/autonat"
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
"github.com/libp2p/go-libp2p/p2p/host/pstoremanager"
"github.com/libp2p/go-libp2p/p2p/host/relaysvc"
- inat "github.com/libp2p/go-libp2p/p2p/net/nat"
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
"github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/libp2p/go-netroute"
logging "github.com/ipfs/go-log/v2"
-
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
msmux "github.com/multiformats/go-multistream"
)
-// The maximum number of address resolution steps we'll perform for a single
-// peer (for all addresses).
-const maxAddressResolution = 32
-
// addrChangeTickrInterval is the interval between two address change ticks.
var addrChangeTickrInterval = 5 * time.Second
@@ -74,7 +71,7 @@ type BasicHost struct {
network network.Network
psManager *pstoremanager.PeerstoreManager
- mux *msmux.MultistreamMuxer
+ mux *msmux.MultistreamMuxer[protocol.ID]
ids identify.IDService
hps *holepunch.Service
pings *ping.PingService
@@ -111,8 +108,11 @@ var _ host.Host = (*BasicHost)(nil)
// HostOpts holds options that can be passed to NewHost in order to
// customize construction of the *BasicHost.
type HostOpts struct {
+ // EventBus sets the event bus. Will construct a new event bus if omitted.
+ EventBus event.Bus
+
// MultistreamMuxer is essential for the *BasicHost and will use a sensible default value if omitted.
- MultistreamMuxer *msmux.MultistreamMuxer
+ MultistreamMuxer *msmux.MultistreamMuxer[protocol.ID]
// NegotiationTimeout determines the read and write timeouts on streams.
// If 0 or omitted, it will use DefaultNegotiationTimeout.
@@ -145,6 +145,9 @@ type HostOpts struct {
// UserAgent sets the user-agent for the host.
UserAgent string
+ // ProtocolVersion sets the protocol version for the host.
+ ProtocolVersion string
+
// DisableSignedPeerRecord disables the generation of Signed Peer Records on this host.
DisableSignedPeerRecord bool
@@ -152,28 +155,36 @@ type HostOpts struct {
EnableHolePunching bool
// HolePunchingOptions are options for the hole punching service
HolePunchingOptions []holepunch.Option
+
+ // EnableMetrics enables the metrics subsystems
+ EnableMetrics bool
+ // PrometheusRegisterer is the PrometheusRegisterer used for metrics
+ PrometheusRegisterer prometheus.Registerer
}
// NewHost constructs a new *BasicHost and activates it by attaching its stream and connection handlers to the given inet.Network.
func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
- eventBus := eventbus.NewBus()
- psManager, err := pstoremanager.NewPeerstoreManager(n.Peerstore(), eventBus)
+ if opts == nil {
+ opts = &HostOpts{}
+ }
+ if opts.EventBus == nil {
+ opts.EventBus = eventbus.NewBus()
+ }
+
+ psManager, err := pstoremanager.NewPeerstoreManager(n.Peerstore(), opts.EventBus)
if err != nil {
return nil, err
}
hostCtx, cancel := context.WithCancel(context.Background())
- if opts == nil {
- opts = &HostOpts{}
- }
h := &BasicHost{
network: n,
psManager: psManager,
- mux: msmux.NewMultistreamMuxer(),
+ mux: msmux.NewMultistreamMuxer[protocol.ID](),
negtimeout: DefaultNegotiationTimeout,
AddrsFactory: DefaultAddrsFactory,
maResolver: madns.DefaultResolver,
- eventbus: eventBus,
+ eventbus: opts.EventBus,
addrChangeChan: make(chan struct{}, 1),
ctx: hostCtx,
ctxCancel: cancel,
@@ -182,17 +193,12 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
h.updateLocalIpAddr()
- if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil {
+ if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}, eventbus.Stateful); err != nil {
return nil, err
}
if h.emitters.evtLocalAddrsUpdated, err = h.eventbus.Emitter(&event.EvtLocalAddressesUpdated{}, eventbus.Stateful); err != nil {
return nil, err
}
- evtPeerConnectednessChanged, err := h.eventbus.Emitter(&event.EvtPeerConnectednessChanged{})
- if err != nil {
- return nil, err
- }
- h.Network().Notify(newPeerConnectWatcher(evtPeerConnectednessChanged))
if !h.disableSignedPeerRecord {
cab, ok := peerstore.GetCertifiedAddrBook(n.Peerstore())
@@ -224,17 +230,33 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
h.mux = opts.MultistreamMuxer
}
+ idOpts := []identify.Option{
+ identify.UserAgent(opts.UserAgent),
+ identify.ProtocolVersion(opts.ProtocolVersion),
+ }
+
// we can't set this as a default above because it depends on the *BasicHost.
if h.disableSignedPeerRecord {
- h.ids, err = identify.NewIDService(h, identify.UserAgent(opts.UserAgent), identify.DisableSignedPeerRecord())
- } else {
- h.ids, err = identify.NewIDService(h, identify.UserAgent(opts.UserAgent))
+ idOpts = append(idOpts, identify.DisableSignedPeerRecord())
+ }
+ if opts.EnableMetrics {
+ idOpts = append(idOpts,
+ identify.WithMetricsTracer(
+ identify.NewMetricsTracer(identify.WithRegisterer(opts.PrometheusRegisterer))))
}
+
+ h.ids, err = identify.NewIDService(h, idOpts...)
if err != nil {
return nil, fmt.Errorf("failed to create Identify service: %s", err)
}
if opts.EnableHolePunching {
+ if opts.EnableMetrics {
+ hpOpts := []holepunch.Option{
+ holepunch.WithMetricsTracer(holepunch.NewMetricsTracer(holepunch.WithRegisterer(opts.PrometheusRegisterer)))}
+ opts.HolePunchingOptions = append(hpOpts, opts.HolePunchingOptions...)
+
+ }
h.hps, err = holepunch.NewService(h, h.ids, opts.HolePunchingOptions...)
if err != nil {
return nil, fmt.Errorf("failed to create hole punch service: %w", err)
@@ -265,6 +287,13 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
}
if opts.EnableRelayService {
+ if opts.EnableMetrics {
+ // Prefer explicitly provided metrics tracer
+ metricsOpt := []relayv2.Option{
+ relayv2.WithMetricsTracer(
+ relayv2.NewMetricsTracer(relayv2.WithRegisterer(opts.PrometheusRegisterer)))}
+ opts.RelayServiceOpts = append(metricsOpt, opts.RelayServiceOpts...)
+ }
h.relayManager = relaysvc.NewRelayManager(h, opts.RelayServiceOpts...)
}
@@ -359,6 +388,7 @@ func (h *BasicHost) updateLocalIpAddr() {
func (h *BasicHost) Start() {
h.psManager.Start()
h.refCount.Add(1)
+ h.ids.Start()
go h.background()
}
@@ -399,13 +429,13 @@ func (h *BasicHost) newStreamHandler(s network.Stream) {
}
}
- if err := s.SetProtocol(protocol.ID(protoID)); err != nil {
+ if err := s.SetProtocol(protoID); err != nil {
log.Debugf("error setting stream protocol: %s", err)
s.Reset()
return
}
- log.Debugf("protocol negotiation took %s", took)
+ log.Debugf("negotiated: %s (took %s)", protoID, took)
go handle(protoID, s)
}
@@ -563,9 +593,8 @@ func (h *BasicHost) EventBus() event.Bus {
//
// (Threadsafe)
func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
- h.Mux().AddHandler(string(pid), func(p string, rwc io.ReadWriteCloser) error {
+ h.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
- is.SetProtocol(protocol.ID(p))
handler(is)
return nil
})
@@ -576,10 +605,9 @@ func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHand
// SetStreamHandlerMatch sets the protocol handler on the Host's Mux
// using a matching function to do protocol comparisons
-func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler network.StreamHandler) {
- h.Mux().AddHandlerWithFunc(string(pid), m, func(p string, rwc io.ReadWriteCloser) error {
+func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
+ h.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
- is.SetProtocol(protocol.ID(p))
handler(is)
return nil
})
@@ -590,7 +618,7 @@ func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool,
// RemoveStreamHandler returns ..
func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) {
- h.Mux().RemoveHandler(string(pid))
+ h.Mux().RemoveHandler(pid)
h.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
Removed: []protocol.ID{pid},
})
@@ -612,8 +640,13 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I
}
}
- s, err := h.Network().NewStream(ctx, p)
+ s, err := h.Network().NewStream(network.WithNoDial(ctx, "already dialed"), p)
if err != nil {
+ // TODO: It would be nicer to get the actual error from the swarm,
+ // but this will require some more work.
+ if errors.Is(err, network.ErrNoConn) {
+ return nil, errors.New("connection failed")
+ }
return nil, err
}
@@ -629,9 +662,7 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I
return nil, ctx.Err()
}
- pidStrings := protocol.ConvertToStrings(pids)
-
- pref, err := h.preferredProtocol(p, pidStrings)
+ pref, err := h.preferredProtocol(p, pids)
if err != nil {
_ = s.Reset()
return nil, err
@@ -639,7 +670,7 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I
if pref != "" {
s.SetProtocol(pref)
- lzcon := msmux.NewMSSelect(s, string(pref))
+ lzcon := msmux.NewMSSelect(s, pref)
return &streamWrapper{
Stream: s,
rw: lzcon,
@@ -647,10 +678,10 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I
}
// Negotiate the protocol in the background, obeying the context.
- var selected string
+ var selected protocol.ID
errCh := make(chan error, 1)
go func() {
- selected, err = msmux.SelectOneOf(pidStrings, s)
+ selected, err = msmux.SelectOneOf(pids, s)
errCh <- err
}()
select {
@@ -666,13 +697,12 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I
return nil, ctx.Err()
}
- selpid := protocol.ID(selected)
- s.SetProtocol(selpid)
+ s.SetProtocol(selected)
h.Peerstore().AddProtocols(p, selected)
return s, nil
}
-func (h *BasicHost) preferredProtocol(p peer.ID, pids []string) (protocol.ID, error) {
+func (h *BasicHost) preferredProtocol(p peer.ID, pids []protocol.ID) (protocol.ID, error) {
supported, err := h.Peerstore().SupportsProtocols(p, pids...)
if err != nil {
return "", err
@@ -680,7 +710,7 @@ func (h *BasicHost) preferredProtocol(p peer.ID, pids []string) (protocol.ID, er
var out protocol.ID
if len(supported) > 0 {
- out = protocol.ID(supported[0])
+ out = supported[0]
}
return out, nil
}
@@ -701,77 +731,9 @@ func (h *BasicHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
}
}
- resolved, err := h.resolveAddrs(ctx, h.Peerstore().PeerInfo(pi.ID))
- if err != nil {
- return err
- }
- h.Peerstore().AddAddrs(pi.ID, resolved, peerstore.TempAddrTTL)
-
return h.dialPeer(ctx, pi.ID)
}
-func (h *BasicHost) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multiaddr, error) {
- proto := ma.ProtocolWithCode(ma.P_P2P).Name
- p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
- if err != nil {
- return nil, err
- }
-
- resolveSteps := 0
-
- // Recursively resolve all addrs.
- //
- // While the toResolve list is non-empty:
- // * Pop an address off.
- // * If the address is fully resolved, add it to the resolved list.
- // * Otherwise, resolve it and add the results to the "to resolve" list.
- toResolve := append(([]ma.Multiaddr)(nil), pi.Addrs...)
- resolved := make([]ma.Multiaddr, 0, len(pi.Addrs))
- for len(toResolve) > 0 {
- // pop the last addr off.
- addr := toResolve[len(toResolve)-1]
- toResolve = toResolve[:len(toResolve)-1]
-
- // if it's resolved, add it to the resolved list.
- if !madns.Matches(addr) {
- resolved = append(resolved, addr)
- continue
- }
-
- resolveSteps++
-
- // We've resolved too many addresses. We can keep all the fully
- // resolved addresses but we'll need to skip the rest.
- if resolveSteps >= maxAddressResolution {
- log.Warnf(
- "peer %s asked us to resolve too many addresses: %s/%s",
- pi.ID,
- resolveSteps,
- maxAddressResolution,
- )
- continue
- }
-
- // otherwise, resolve it
- reqaddr := addr.Encapsulate(p2paddr)
- resaddrs, err := h.maResolver.Resolve(ctx, reqaddr)
- if err != nil {
- log.Infof("error resolving %s: %s", reqaddr, err)
- }
-
- // add the results to the toResolve list.
- for _, res := range resaddrs {
- pi, err := peer.AddrInfoFromP2pAddr(res)
- if err != nil {
- log.Infof("error parsing %s: %s", res, err)
- }
- toResolve = append(toResolve, pi.Addrs...)
- }
- }
-
- return resolved, nil
-}
-
// dialPeer opens a connection to peer, and makes sure to identify
// the connection once it has been opened.
func (h *BasicHost) dialPeer(ctx context.Context, p peer.ID) error {
@@ -803,21 +765,56 @@ func (h *BasicHost) ConnManager() connmgr.ConnManager {
// Addrs returns listening addresses that are safe to announce to the network.
// The output is the same as AllAddrs, but processed by AddrsFactory.
func (h *BasicHost) Addrs() []ma.Multiaddr {
- return h.AddrsFactory(h.AllAddrs())
+ // This is a temporary workaround/hack that fixes #2233. Once we have a
+ // proper address pipeline, rework this. See the issue for more context.
+ type transportForListeninger interface {
+ TransportForListening(a ma.Multiaddr) transport.Transport
+ }
+
+ type addCertHasher interface {
+ AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool)
+ }
+
+ addrs := h.AddrsFactory(h.AllAddrs())
+
+ s, ok := h.Network().(transportForListeninger)
+ if !ok {
+ return addrs
+ }
+
+ // Copy addrs slice since we'll be modifying it.
+ addrsOld := addrs
+ addrs = make([]ma.Multiaddr, len(addrsOld))
+ copy(addrs, addrsOld)
+
+ for i, addr := range addrs {
+ if ok, n := libp2pwebtransport.IsWebtransportMultiaddr(addr); ok && n == 0 {
+ t := s.TransportForListening(addr)
+ tpt, ok := t.(addCertHasher)
+ if !ok {
+ continue
+ }
+ addrWithCerthash, added := tpt.AddCertHashes(addr)
+ addrs[i] = addrWithCerthash
+ if !added {
+ log.Debug("Couldn't add certhashes to webtransport multiaddr because we aren't listening on webtransport")
+ }
+ }
+ }
+ return addrs
}
-// mergeAddrs merges input address lists, leave only unique addresses
-func dedupAddrs(addrs []ma.Multiaddr) (uniqueAddrs []ma.Multiaddr) {
- exists := make(map[string]bool)
- for _, addr := range addrs {
- k := string(addr.Bytes())
- if exists[k] {
- continue
+// NormalizeMultiaddr returns a multiaddr suitable for equality checks.
+// If the multiaddr is a webtransport component, it removes the certhashes.
+func (h *BasicHost) NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
+ if ok, n := libp2pwebtransport.IsWebtransportMultiaddr(addr); ok && n > 0 {
+ out := addr
+ for i := 0; i < n; i++ {
+ out, _ = ma.SplitLast(out)
}
- exists[k] = true
- uniqueAddrs = append(uniqueAddrs, addr)
+ return out
}
- return uniqueAddrs
+ return addr
}
// AllAddrs returns all the addresses of BasicHost at this moment in time.
@@ -831,7 +828,6 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
h.addrMu.RLock()
filteredIfaceAddrs := h.filteredInterfaceAddrs
allIfaceAddrs := h.allInterfaceAddrs
- autonat := h.autoNat
h.addrMu.RUnlock()
// Iterate over all _unresolved_ listen addresses, resolving our primary
@@ -845,114 +841,20 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
finalAddrs = append(finalAddrs, resolved...)
}
- // add autonat PublicAddr Consider the following scenario
- // For example, it is deployed on a cloud server,
- // it provides an elastic ip accessible to the public network,
- // but not have an external network card,
- // so net.InterfaceAddrs() not has the public ip
- // The host can indeed be dialed !!!
- if autonat != nil {
- publicAddr, _ := autonat.PublicAddr()
- if publicAddr != nil {
- finalAddrs = append(finalAddrs, publicAddr)
- }
- }
-
- finalAddrs = dedupAddrs(finalAddrs)
+ finalAddrs = network.DedupAddrs(finalAddrs)
- var natMappings []inat.Mapping
-
- // natmgr is nil if we do not use nat option;
- // h.natmgr.NAT() is nil if not ready, or no nat is available.
- if h.natmgr != nil && h.natmgr.NAT() != nil {
- natMappings = h.natmgr.NAT().Mappings()
- }
-
- if len(natMappings) > 0 {
+ // use nat mappings if we have them
+ if h.natmgr != nil && h.natmgr.HasDiscoveredNAT() {
// We have successfully mapped ports on our NAT. Use those
// instead of observed addresses (mostly).
-
- // First, generate a mapping table.
- // protocol -> internal port -> external addr
- ports := make(map[string]map[int]net.Addr)
- for _, m := range natMappings {
- addr, err := m.ExternalAddr()
- if err != nil {
- // mapping not ready yet.
- continue
- }
- protoPorts, ok := ports[m.Protocol()]
- if !ok {
- protoPorts = make(map[int]net.Addr)
- ports[m.Protocol()] = protoPorts
- }
- protoPorts[m.InternalPort()] = addr
- }
-
// Next, apply this mapping to our addresses.
for _, listen := range listenAddrs {
- found := false
- transport, rest := ma.SplitFunc(listen, func(c ma.Component) bool {
- if found {
- return true
- }
- switch c.Protocol().Code {
- case ma.P_TCP, ma.P_UDP:
- found = true
- }
- return false
- })
- if !manet.IsThinWaist(transport) {
- continue
- }
-
- naddr, err := manet.ToNetAddr(transport)
- if err != nil {
- log.Error("error parsing net multiaddr %q: %s", transport, err)
- continue
- }
-
- var (
- ip net.IP
- iport int
- protocol string
- )
- switch naddr := naddr.(type) {
- case *net.TCPAddr:
- ip = naddr.IP
- iport = naddr.Port
- protocol = "tcp"
- case *net.UDPAddr:
- ip = naddr.IP
- iport = naddr.Port
- protocol = "udp"
- default:
- continue
- }
-
- if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
- // We only map global unicast & unspecified addresses ports.
- // Not broadcast, multicast, etc.
- continue
- }
-
- mappedAddr, ok := ports[protocol][iport]
- if !ok {
- // Not mapped.
- continue
- }
-
- mappedMaddr, err := manet.FromNetAddr(mappedAddr)
- if err != nil {
- log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err)
+ extMaddr := h.natmgr.GetMapping(listen)
+ if extMaddr == nil {
+ // not mapped
continue
}
- extMaddr := mappedMaddr
- if rest != nil {
- extMaddr = ma.Join(extMaddr, rest)
- }
-
// if the router reported a sane address
if !manet.IsIPUnspecified(extMaddr) {
// Add in the mapped addr.
@@ -1008,8 +910,83 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
}
finalAddrs = append(finalAddrs, observedAddrs...)
}
+ finalAddrs = network.DedupAddrs(finalAddrs)
+ finalAddrs = inferWebtransportAddrsFromQuic(finalAddrs)
+
+ return finalAddrs
+}
+
+var wtComponent = ma.StringCast("/webtransport")
+
+// inferWebtransportAddrsFromQuic infers more webtransport addresses from QUIC addresses.
+// This is useful when we discover our public QUIC address, but haven't discovered our public WebTransport addrs.
+// If we see that we are listening on the same port for QUIC and WebTransport,
+// we can be pretty sure that the WebTransport addr will be reachable if the
+// QUIC one is.
+// We assume the input is deduped.
+func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr {
+ // We need to check if we are listening on the same ip+port for QUIC and WebTransport.
+ // If not, there's nothing to do since we can't infer anything.
+
+ // Count the number of QUIC addrs, this will let us allocate just once at the beginning.
+ quicAddrCount := 0
+ for _, addr := range in {
+ if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
+ quicAddrCount++
+ }
+ }
+ quicOrWebtransportAddrs := make(map[string]struct{}, quicAddrCount)
+ webtransportAddrs := make(map[string]struct{}, quicAddrCount)
+ foundSameListeningAddr := false
+ for _, addr := range in {
+ isWebtransport, numCertHashes := libp2pwebtransport.IsWebtransportMultiaddr(addr)
+ if isWebtransport {
+ for i := 0; i < numCertHashes; i++ {
+ // Remove certhashes
+ addr, _ = ma.SplitLast(addr)
+ }
+ webtransportAddrs[addr.String()] = struct{}{}
+ // Remove webtransport component, now it's a multiaddr that ends in /quic-v1
+ addr, _ = ma.SplitLast(addr)
+ }
+
+ if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
+ addrStr := addr.String()
+ if _, ok := quicOrWebtransportAddrs[addrStr]; ok {
+ foundSameListeningAddr = true
+ } else {
+ quicOrWebtransportAddrs[addrStr] = struct{}{}
+ }
+ }
+ }
+
+ if !foundSameListeningAddr {
+ return in
+ }
+
+ if len(webtransportAddrs) == 0 {
+ // No webtransport addresses, we aren't listening on any webtransport
+ // address, so we shouldn't add any.
+ return in
+ }
+
+ out := make([]ma.Multiaddr, 0, len(in)+(quicAddrCount-len(webtransportAddrs)))
+ for _, addr := range in {
+ // Add all the original addresses
+ out = append(out, addr)
+ if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 {
+ // Convert quic to webtransport
+ addr = addr.Encapsulate(wtComponent)
+ if _, ok := webtransportAddrs[addr.String()]; ok {
+ // We already have this address
+ continue
+ }
+ // Add the new inferred address
+ out = append(out, addr)
+ }
+ }
- return dedupAddrs(finalAddrs)
+ return out
}
// SetAutoNat sets the autonat service for the host.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/mocks.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/mocks.go
new file mode 100644
index 000000000..3ad4d4e90
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/mocks.go
@@ -0,0 +1,6 @@
+//go:build gomock || generate
+
+package basichost
+
+//go:generate sh -c "go run github.com/golang/mock/mockgen -build_flags=\"-tags=gomock\" -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT"
+type NAT nat
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/natmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/natmgr.go
index 782c116d4..8e8fbea34 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/natmgr.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/natmgr.go
@@ -4,6 +4,7 @@ import (
"context"
"io"
"net"
+ "net/netip"
"strconv"
"sync"
"time"
@@ -12,24 +13,38 @@ import (
inat "github.com/libp2p/go-libp2p/p2p/net/nat"
ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
)
// NATManager is a simple interface to manage NAT devices.
+// It listens Listen and ListenClose notifications from the network.Network,
+// and tries to obtain port mappings for those.
type NATManager interface {
- // NAT gets the NAT device managed by the NAT manager.
- NAT() *inat.NAT
-
- // Ready receives a notification when the NAT device is ready for use.
- Ready() <-chan struct{}
-
+ GetMapping(ma.Multiaddr) ma.Multiaddr
+ HasDiscoveredNAT() bool
io.Closer
}
// NewNATManager creates a NAT manager.
func NewNATManager(net network.Network) NATManager {
- return newNatManager(net)
+ return newNATManager(net)
}
+type entry struct {
+ protocol string
+ port int
+}
+
+type nat interface {
+ AddMapping(ctx context.Context, protocol string, port int) error
+ RemoveMapping(ctx context.Context, protocol string, port int) error
+ GetMapping(protocol string, port int) (netip.AddrPort, bool)
+ io.Closer
+}
+
+// so we can mock it in tests
+var discoverNAT = func(ctx context.Context) (nat, error) { return inat.DiscoverNAT(ctx) }
+
// natManager takes care of adding + removing port mappings to the nat.
// Initialized with the host if it has a NATPortMap option enabled.
// natManager receives signals from the network, and check on nat mappings:
@@ -39,22 +54,25 @@ func NewNATManager(net network.Network) NATManager {
type natManager struct {
net network.Network
natMx sync.RWMutex
- nat *inat.NAT
+ nat nat
+
+ syncFlag chan struct{} // cap: 1
- ready chan struct{} // closed once the nat is ready to process port mappings
- syncFlag chan struct{}
+ tracked map[entry]bool // the bool is only used in doSync and has no meaning outside of that function
refCount sync.WaitGroup
+ ctx context.Context
ctxCancel context.CancelFunc
}
-func newNatManager(net network.Network) *natManager {
+func newNATManager(net network.Network) *natManager {
ctx, cancel := context.WithCancel(context.Background())
nmgr := &natManager{
net: net,
- ready: make(chan struct{}),
syncFlag: make(chan struct{}, 1),
+ ctx: ctx,
ctxCancel: cancel,
+ tracked: make(map[entry]bool),
}
nmgr.refCount.Add(1)
go nmgr.background(ctx)
@@ -69,10 +87,10 @@ func (nmgr *natManager) Close() error {
return nil
}
-// Ready returns a channel which will be closed when the NAT has been found
-// and is ready to be used, or the search process is done.
-func (nmgr *natManager) Ready() <-chan struct{} {
- return nmgr.ready
+func (nmgr *natManager) HasDiscoveredNAT() bool {
+ nmgr.natMx.RLock()
+ defer nmgr.natMx.RUnlock()
+ return nmgr.nat != nil
}
func (nmgr *natManager) background(ctx context.Context) {
@@ -80,25 +98,24 @@ func (nmgr *natManager) background(ctx context.Context) {
defer func() {
nmgr.natMx.Lock()
+ defer nmgr.natMx.Unlock()
+
if nmgr.nat != nil {
nmgr.nat.Close()
}
- nmgr.natMx.Unlock()
}()
discoverCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
- natInstance, err := inat.DiscoverNAT(discoverCtx)
+ natInstance, err := discoverNAT(discoverCtx)
if err != nil {
log.Info("DiscoverNAT error:", err)
- close(nmgr.ready)
return
}
nmgr.natMx.Lock()
nmgr.nat = natInstance
nmgr.natMx.Unlock()
- close(nmgr.ready)
// sign natManager up for network notifications
// we need to sign up here to avoid missing some notifs
@@ -127,10 +144,10 @@ func (nmgr *natManager) sync() {
// doSync syncs the current NAT mappings, removing any outdated mappings and adding any
// new mappings.
func (nmgr *natManager) doSync() {
- ports := map[string]map[int]bool{
- "tcp": {},
- "udp": {},
+ for e := range nmgr.tracked {
+ nmgr.tracked[e] = false
}
+ var newAddresses []entry
for _, maddr := range nmgr.net.ListenAddresses() {
// Strip the IP
maIP, rest := ma.SplitFirst(maddr)
@@ -144,10 +161,9 @@ func (nmgr *natManager) doSync() {
continue
}
- // Only bother if we're listening on a
- // unicast/unspecified IP.
+ // Only bother if we're listening on an unicast / unspecified IP.
ip := net.IP(maIP.RawValue())
- if !(ip.IsGlobalUnicast() || ip.IsUnspecified()) {
+ if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
continue
}
@@ -166,74 +182,118 @@ func (nmgr *natManager) doSync() {
default:
continue
}
-
port, err := strconv.ParseUint(proto.Value(), 10, 16)
if err != nil {
// bug in multiaddr
panic(err)
}
- ports[protocol][int(port)] = false
+ e := entry{protocol: protocol, port: int(port)}
+ if _, ok := nmgr.tracked[e]; ok {
+ nmgr.tracked[e] = true
+ } else {
+ newAddresses = append(newAddresses, e)
+ }
}
var wg sync.WaitGroup
defer wg.Wait()
// Close old mappings
- for _, m := range nmgr.nat.Mappings() {
- mappedPort := m.InternalPort()
- if _, ok := ports[m.Protocol()][mappedPort]; !ok {
- // No longer need this mapping.
- wg.Add(1)
- go func(m inat.Mapping) {
- defer wg.Done()
- m.Close()
- }(m)
- } else {
- // already mapped
- ports[m.Protocol()][mappedPort] = true
+ for e, v := range nmgr.tracked {
+ if !v {
+ nmgr.nat.RemoveMapping(nmgr.ctx, e.protocol, e.port)
+ delete(nmgr.tracked, e)
}
}
// Create new mappings.
- for proto, pports := range ports {
- for port, mapped := range pports {
- if mapped {
- continue
- }
- wg.Add(1)
- go func(proto string, port int) {
- defer wg.Done()
- _, err := nmgr.nat.NewMapping(proto, port)
- if err != nil {
- log.Errorf("failed to port-map %s port %d: %s", proto, port, err)
- }
- }(proto, port)
+ for _, e := range newAddresses {
+ if err := nmgr.nat.AddMapping(nmgr.ctx, e.protocol, e.port); err != nil {
+ log.Errorf("failed to port-map %s port %d: %s", e.protocol, e.port, err)
}
+ nmgr.tracked[e] = false
}
}
-// NAT returns the natManager's nat object. this may be nil, if
-// (a) the search process is still ongoing, or (b) the search process
-// found no nat. Clients must check whether the return value is nil.
-func (nmgr *natManager) NAT() *inat.NAT {
+func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
nmgr.natMx.Lock()
defer nmgr.natMx.Unlock()
- return nmgr.nat
-}
-type nmgrNetNotifiee natManager
+ if nmgr.nat == nil { // NAT not yet initialized
+ return nil
+ }
-func (nn *nmgrNetNotifiee) natManager() *natManager {
- return (*natManager)(nn)
-}
+ var found bool
+ var proto int // ma.P_TCP or ma.P_UDP
+ transport, rest := ma.SplitFunc(addr, func(c ma.Component) bool {
+ if found {
+ return true
+ }
+ proto = c.Protocol().Code
+ found = proto == ma.P_TCP || proto == ma.P_UDP
+ return false
+ })
+ if !manet.IsThinWaist(transport) {
+ return nil
+ }
-func (nn *nmgrNetNotifiee) Listen(n network.Network, addr ma.Multiaddr) {
- nn.natManager().sync()
-}
+ naddr, err := manet.ToNetAddr(transport)
+ if err != nil {
+ log.Error("error parsing net multiaddr %q: %s", transport, err)
+ return nil
+ }
+
+ var (
+ ip net.IP
+ port int
+ protocol string
+ )
+ switch naddr := naddr.(type) {
+ case *net.TCPAddr:
+ ip = naddr.IP
+ port = naddr.Port
+ protocol = "tcp"
+ case *net.UDPAddr:
+ ip = naddr.IP
+ port = naddr.Port
+ protocol = "udp"
+ default:
+ return nil
+ }
+
+ if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
+ // We only map global unicast & unspecified addresses ports, not broadcast, multicast, etc.
+ return nil
+ }
+
+ extAddr, ok := nmgr.nat.GetMapping(protocol, port)
+ if !ok {
+ return nil
+ }
-func (nn *nmgrNetNotifiee) ListenClose(n network.Network, addr ma.Multiaddr) {
- nn.natManager().sync()
+ var mappedAddr net.Addr
+ switch naddr.(type) {
+ case *net.TCPAddr:
+ mappedAddr = net.TCPAddrFromAddrPort(extAddr)
+ case *net.UDPAddr:
+ mappedAddr = net.UDPAddrFromAddrPort(extAddr)
+ }
+ mappedMaddr, err := manet.FromNetAddr(mappedAddr)
+ if err != nil {
+ log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err)
+ return nil
+ }
+ extMaddr := mappedMaddr
+ if rest != nil {
+ extMaddr = ma.Join(extMaddr, rest)
+ }
+ return extMaddr
}
-func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
-func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}
+type nmgrNetNotifiee natManager
+
+func (nn *nmgrNetNotifiee) natManager() *natManager { return (*natManager)(nn) }
+func (nn *nmgrNetNotifiee) Listen(network.Network, ma.Multiaddr) { nn.natManager().sync() }
+func (nn *nmgrNetNotifiee) ListenClose(n network.Network, addr ma.Multiaddr) { nn.natManager().sync() }
+func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
+func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go
deleted file mode 100644
index bfc46ed8f..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package basichost
-
-import (
- "sync"
-
- "github.com/libp2p/go-libp2p/core/event"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
-
- ma "github.com/multiformats/go-multiaddr"
-)
-
-type peerConnectWatcher struct {
- emitter event.Emitter
-
- mutex sync.Mutex
- connected map[peer.ID]struct{}
-}
-
-var _ network.Notifiee = &peerConnectWatcher{}
-
-func newPeerConnectWatcher(emitter event.Emitter) *peerConnectWatcher {
- return &peerConnectWatcher{
- emitter: emitter,
- connected: make(map[peer.ID]struct{}),
- }
-}
-
-func (w *peerConnectWatcher) Listen(network.Network, ma.Multiaddr) {}
-func (w *peerConnectWatcher) ListenClose(network.Network, ma.Multiaddr) {}
-
-func (w *peerConnectWatcher) Connected(n network.Network, conn network.Conn) {
- p := conn.RemotePeer()
- w.handleTransition(p, n.Connectedness(p))
-}
-
-func (w *peerConnectWatcher) Disconnected(n network.Network, conn network.Conn) {
- p := conn.RemotePeer()
- w.handleTransition(p, n.Connectedness(p))
-}
-
-func (w *peerConnectWatcher) handleTransition(p peer.ID, state network.Connectedness) {
- if changed := w.checkTransition(p, state); !changed {
- return
- }
- w.emitter.Emit(event.EvtPeerConnectednessChanged{
- Peer: p,
- Connectedness: state,
- })
-}
-
-func (w *peerConnectWatcher) checkTransition(p peer.ID, state network.Connectedness) bool {
- w.mutex.Lock()
- defer w.mutex.Unlock()
- switch state {
- case network.Connected:
- if _, ok := w.connected[p]; ok {
- return false
- }
- w.connected[p] = struct{}{}
- return true
- case network.NotConnected:
- if _, ok := w.connected[p]; ok {
- delete(w.connected, p)
- return true
- }
- return false
- default:
- return false
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go
index 16753eb0d..9f3daeff2 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go
@@ -27,7 +27,7 @@ var log = logging.Logger("blankhost")
// BlankHost is the thinnest implementation of the host.Host interface
type BlankHost struct {
n network.Network
- mux *mstream.MultistreamMuxer
+ mux *mstream.MultistreamMuxer[protocol.ID]
cmgr connmgr.ConnManager
eventbus event.Bus
emitters struct {
@@ -65,10 +65,10 @@ func NewBlankHost(n network.Network, options ...Option) *BlankHost {
bh := &BlankHost{
n: n,
cmgr: cfg.cmgr,
- mux: mstream.NewMultistreamMuxer(),
+ mux: mstream.NewMultistreamMuxer[protocol.ID](),
}
if bh.eventbus == nil {
- bh.eventbus = eventbus.NewBus()
+ bh.eventbus = eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer()))
}
// subscribe the connection manager to network notifications (has no effect with NullConnMgr)
@@ -78,11 +78,6 @@ func NewBlankHost(n network.Network, options ...Option) *BlankHost {
if bh.emitters.evtLocalProtocolsUpdated, err = bh.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil {
return nil
}
- evtPeerConnectednessChanged, err := bh.eventbus.Emitter(&event.EvtPeerConnectednessChanged{})
- if err != nil {
- return nil
- }
- n.Notify(newPeerConnectWatcher(evtPeerConnectednessChanged))
n.SetStreamHandler(bh.newStreamHandler)
@@ -158,35 +153,29 @@ func (bh *BlankHost) NewStream(ctx context.Context, p peer.ID, protos ...protoco
return nil, err
}
- protoStrs := make([]string, len(protos))
- for i, pid := range protos {
- protoStrs[i] = string(pid)
- }
-
- selected, err := mstream.SelectOneOf(protoStrs, s)
+ selected, err := mstream.SelectOneOf(protos, s)
if err != nil {
s.Reset()
return nil, err
}
- selpid := protocol.ID(selected)
- s.SetProtocol(selpid)
+ s.SetProtocol(selected)
bh.Peerstore().AddProtocols(p, selected)
return s, nil
}
func (bh *BlankHost) RemoveStreamHandler(pid protocol.ID) {
- bh.Mux().RemoveHandler(string(pid))
+ bh.Mux().RemoveHandler(pid)
bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
Removed: []protocol.ID{pid},
})
}
func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
- bh.Mux().AddHandler(string(pid), func(p string, rwc io.ReadWriteCloser) error {
+ bh.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
- is.SetProtocol(protocol.ID(p))
+ is.SetProtocol(p)
handler(is)
return nil
})
@@ -195,10 +184,10 @@ func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHan
})
}
-func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler network.StreamHandler) {
- bh.Mux().AddHandlerWithFunc(string(pid), m, func(p string, rwc io.ReadWriteCloser) error {
+func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
+ bh.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
- is.SetProtocol(protocol.ID(p))
+ is.SetProtocol(p)
handler(is)
return nil
})
@@ -216,7 +205,7 @@ func (bh *BlankHost) newStreamHandler(s network.Stream) {
return
}
- s.SetProtocol(protocol.ID(protoID))
+ s.SetProtocol(protoID)
go handle(protoID, s)
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go
deleted file mode 100644
index 4f70540fc..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package blankhost
-
-import (
- "sync"
-
- "github.com/libp2p/go-libp2p/core/event"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
-
- ma "github.com/multiformats/go-multiaddr"
-)
-
-type peerConnectWatcher struct {
- emitter event.Emitter
-
- mutex sync.Mutex
- connected map[peer.ID]struct{}
-}
-
-var _ network.Notifiee = &peerConnectWatcher{}
-
-func newPeerConnectWatcher(emitter event.Emitter) *peerConnectWatcher {
- return &peerConnectWatcher{
- emitter: emitter,
- connected: make(map[peer.ID]struct{}),
- }
-}
-
-func (w *peerConnectWatcher) Listen(network.Network, ma.Multiaddr) {}
-func (w *peerConnectWatcher) ListenClose(network.Network, ma.Multiaddr) {}
-
-func (w *peerConnectWatcher) Connected(n network.Network, conn network.Conn) {
- p := conn.RemotePeer()
- w.handleTransition(p, n.Connectedness(p))
-}
-
-func (w *peerConnectWatcher) Disconnected(n network.Network, conn network.Conn) {
- p := conn.RemotePeer()
- w.handleTransition(p, n.Connectedness(p))
-}
-
-func (w *peerConnectWatcher) handleTransition(p peer.ID, state network.Connectedness) {
- if changed := w.checkTransition(p, state); !changed {
- return
- }
- w.emitter.Emit(event.EvtPeerConnectednessChanged{
- Peer: p,
- Connectedness: state,
- })
-}
-
-func (w *peerConnectWatcher) checkTransition(p peer.ID, state network.Connectedness) bool {
- w.mutex.Lock()
- defer w.mutex.Unlock()
- switch state {
- case network.Connected:
- if _, ok := w.connected[p]; ok {
- return false
- }
- w.connected[p] = struct{}{}
- return true
- case network.NotConnected:
- if _, ok := w.connected[p]; ok {
- delete(w.connected, p)
- return true
- }
- return false
- default:
- return false
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE
deleted file mode 100644
index 14478a3b6..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE
+++ /dev/null
@@ -1,5 +0,0 @@
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go
index 6ab6c410a..42365a791 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go
@@ -15,46 +15,56 @@ import (
// basicBus is a type-based event delivery system
type basicBus struct {
- lk sync.RWMutex
- nodes map[reflect.Type]*node
- wildcard *wildcardNode
+ lk sync.RWMutex
+ nodes map[reflect.Type]*node
+ wildcard *wildcardNode
+ metricsTracer MetricsTracer
}
var _ event.Bus = (*basicBus)(nil)
type emitter struct {
- n *node
- w *wildcardNode
- typ reflect.Type
- closed int32
- dropper func(reflect.Type)
+ n *node
+ w *wildcardNode
+ typ reflect.Type
+ closed atomic.Bool
+ dropper func(reflect.Type)
+ metricsTracer MetricsTracer
}
func (e *emitter) Emit(evt interface{}) error {
- if atomic.LoadInt32(&e.closed) != 0 {
+ if e.closed.Load() {
return fmt.Errorf("emitter is closed")
}
+
e.n.emit(evt)
e.w.emit(evt)
+ if e.metricsTracer != nil {
+ e.metricsTracer.EventEmitted(e.typ)
+ }
return nil
}
func (e *emitter) Close() error {
- if !atomic.CompareAndSwapInt32(&e.closed, 0, 1) {
+ if !e.closed.CompareAndSwap(false, true) {
return fmt.Errorf("closed an emitter more than once")
}
- if atomic.AddInt32(&e.n.nEmitters, -1) == 0 {
+ if e.n.nEmitters.Add(-1) == 0 {
e.dropper(e.typ)
}
return nil
}
-func NewBus() event.Bus {
- return &basicBus{
+func NewBus(opts ...Option) event.Bus {
+ bus := &basicBus{
nodes: map[reflect.Type]*node{},
- wildcard: new(wildcardNode),
+ wildcard: &wildcardNode{},
+ }
+ for _, opt := range opts {
+ opt(bus)
}
+ return bus
}
func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node)) {
@@ -62,7 +72,7 @@ func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node))
n, ok := b.nodes[typ]
if !ok {
- n = newNode(typ)
+ n = newNode(typ, b.metricsTracer)
b.nodes[typ] = n
}
@@ -90,7 +100,7 @@ func (b *basicBus) tryDropNode(typ reflect.Type) {
}
n.lk.Lock()
- if atomic.LoadInt32(&n.nEmitters) > 0 || len(n.sinks) > 0 {
+ if n.nEmitters.Load() > 0 || len(n.sinks) > 0 {
n.lk.Unlock()
b.lk.Unlock()
return // still in use
@@ -102,8 +112,10 @@ func (b *basicBus) tryDropNode(typ reflect.Type) {
}
type wildcardSub struct {
- ch chan interface{}
- w *wildcardNode
+ ch chan interface{}
+ w *wildcardNode
+ metricsTracer MetricsTracer
+ name string
}
func (w *wildcardSub) Out() <-chan interface{} {
@@ -112,13 +124,31 @@ func (w *wildcardSub) Out() <-chan interface{} {
func (w *wildcardSub) Close() error {
w.w.removeSink(w.ch)
+ if w.metricsTracer != nil {
+ w.metricsTracer.RemoveSubscriber(reflect.TypeOf(event.WildcardSubscription))
+ }
return nil
}
+func (w *wildcardSub) Name() string {
+ return w.name
+}
+
+type namedSink struct {
+ name string
+ ch chan interface{}
+}
+
type sub struct {
- ch chan interface{}
- nodes []*node
- dropper func(reflect.Type)
+ ch chan interface{}
+ nodes []*node
+ dropper func(reflect.Type)
+ metricsTracer MetricsTracer
+ name string
+}
+
+func (s *sub) Name() string {
+ return s.name
}
func (s *sub) Out() <-chan interface{} {
@@ -137,14 +167,18 @@ func (s *sub) Close() error {
n.lk.Lock()
for i := 0; i < len(n.sinks); i++ {
- if n.sinks[i] == s.ch {
+ if n.sinks[i].ch == s.ch {
n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil
n.sinks = n.sinks[:len(n.sinks)-1]
+
+ if s.metricsTracer != nil {
+ s.metricsTracer.RemoveSubscriber(n.typ)
+ }
break
}
}
- tryDrop := len(n.sinks) == 0 && atomic.LoadInt32(&n.nEmitters) == 0
+ tryDrop := len(n.sinks) == 0 && n.nEmitters.Load() == 0
n.lk.Unlock()
@@ -162,7 +196,7 @@ var _ event.Subscription = (*sub)(nil)
// publishers to get blocked. CancelFunc is guaranteed to return after last send
// to the channel
func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) {
- settings := subSettingsDefault
+ settings := newSubSettings()
for _, opt := range opts {
if err := opt(&settings); err != nil {
return nil, err
@@ -171,10 +205,12 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
if evtTypes == event.WildcardSubscription {
out := &wildcardSub{
- ch: make(chan interface{}, settings.buffer),
- w: b.wildcard,
+ ch: make(chan interface{}, settings.buffer),
+ w: b.wildcard,
+ metricsTracer: b.metricsTracer,
+ name: settings.name,
}
- b.wildcard.addSink(out.ch)
+ b.wildcard.addSink(&namedSink{ch: out.ch, name: out.name})
return out, nil
}
@@ -195,7 +231,9 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
ch: make(chan interface{}, settings.buffer),
nodes: make([]*node, len(types)),
- dropper: b.tryDropNode,
+ dropper: b.tryDropNode,
+ metricsTracer: b.metricsTracer,
+ name: settings.name,
}
for _, etyp := range types {
@@ -208,8 +246,11 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt
typ := reflect.TypeOf(etyp)
b.withNode(typ.Elem(), func(n *node) {
- n.sinks = append(n.sinks, out.ch)
+ n.sinks = append(n.sinks, &namedSink{ch: out.ch, name: out.name})
out.nodes[i] = n
+ if b.metricsTracer != nil {
+ b.metricsTracer.AddSubscriber(typ.Elem())
+ }
}, func(n *node) {
if n.keepLast {
l := n.last
@@ -253,9 +294,9 @@ func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e eve
typ = typ.Elem()
b.withNode(typ, func(n *node) {
- atomic.AddInt32(&n.nEmitters, 1)
+ n.nEmitters.Add(1)
n.keepLast = n.keepLast || settings.makeStateful
- e = &emitter{n: n, typ: typ, dropper: b.tryDropNode, w: b.wildcard}
+ e = &emitter{n: n, typ: typ, dropper: b.tryDropNode, w: b.wildcard, metricsTracer: b.metricsTracer}
}, nil)
return
}
@@ -278,22 +319,27 @@ func (b *basicBus) GetAllEventTypes() []reflect.Type {
type wildcardNode struct {
sync.RWMutex
- nSinks int32
- sinks []chan interface{}
+ nSinks atomic.Int32
+ sinks []*namedSink
+ metricsTracer MetricsTracer
}
-func (n *wildcardNode) addSink(ch chan interface{}) {
- atomic.AddInt32(&n.nSinks, 1) // ok to do outside the lock
+func (n *wildcardNode) addSink(sink *namedSink) {
+ n.nSinks.Add(1) // ok to do outside the lock
n.Lock()
- n.sinks = append(n.sinks, ch)
+ n.sinks = append(n.sinks, sink)
n.Unlock()
+
+ if n.metricsTracer != nil {
+ n.metricsTracer.AddSubscriber(reflect.TypeOf(event.WildcardSubscription))
+ }
}
func (n *wildcardNode) removeSink(ch chan interface{}) {
- atomic.AddInt32(&n.nSinks, -1) // ok to do outside the lock
+ n.nSinks.Add(-1) // ok to do outside the lock
n.Lock()
for i := 0; i < len(n.sinks); i++ {
- if n.sinks[i] == ch {
+ if n.sinks[i].ch == ch {
n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil
n.sinks = n.sinks[:len(n.sinks)-1]
break
@@ -303,13 +349,18 @@ func (n *wildcardNode) removeSink(ch chan interface{}) {
}
func (n *wildcardNode) emit(evt interface{}) {
- if atomic.LoadInt32(&n.nSinks) == 0 {
+ if n.nSinks.Load() == 0 {
return
}
n.RLock()
- for _, ch := range n.sinks {
- ch <- evt
+ for _, sink := range n.sinks {
+
+ // Sending metrics before sending on channel allows us to
+ // record channel full events before blocking
+ sendSubscriberMetrics(n.metricsTracer, sink)
+
+ sink.ch <- evt
}
n.RUnlock()
}
@@ -321,17 +372,19 @@ type node struct {
typ reflect.Type
// emitter ref count
- nEmitters int32
+ nEmitters atomic.Int32
keepLast bool
last interface{}
- sinks []chan interface{}
+ sinks []*namedSink
+ metricsTracer MetricsTracer
}
-func newNode(typ reflect.Type) *node {
+func newNode(typ reflect.Type, metricsTracer MetricsTracer) *node {
return &node{
- typ: typ,
+ typ: typ,
+ metricsTracer: metricsTracer,
}
}
@@ -346,8 +399,20 @@ func (n *node) emit(evt interface{}) {
n.last = evt
}
- for _, ch := range n.sinks {
- ch <- evt
+ for _, sink := range n.sinks {
+
+ // Sending metrics before sending on channel allows us to
+ // record channel full events before blocking
+ sendSubscriberMetrics(n.metricsTracer, sink)
+ sink.ch <- evt
}
n.lk.Unlock()
}
+
+func sendSubscriberMetrics(metricsTracer MetricsTracer, sink *namedSink) {
+ if metricsTracer != nil {
+ metricsTracer.SubscriberQueueLength(sink.name, len(sink.ch)+1)
+ metricsTracer.SubscriberQueueFull(sink.name, len(sink.ch)+1 >= cap(sink.ch))
+ metricsTracer.SubscriberEventQueued(sink.name)
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go
new file mode 100644
index 000000000..8e7b1e88d
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go
@@ -0,0 +1,164 @@
+package eventbus
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_eventbus"
+
+var (
+ eventsEmitted = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "events_emitted_total",
+ Help: "Events Emitted",
+ },
+ []string{"event"},
+ )
+ totalSubscribers = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "subscribers_total",
+ Help: "Number of subscribers for an event type",
+ },
+ []string{"event"},
+ )
+ subscriberQueueLength = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "subscriber_queue_length",
+ Help: "Subscriber queue length",
+ },
+ []string{"subscriber_name"},
+ )
+ subscriberQueueFull = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "subscriber_queue_full",
+ Help: "Subscriber Queue completely full",
+ },
+ []string{"subscriber_name"},
+ )
+ subscriberEventQueued = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "subscriber_event_queued",
+ Help: "Event Queued for subscriber",
+ },
+ []string{"subscriber_name"},
+ )
+ collectors = []prometheus.Collector{
+ eventsEmitted,
+ totalSubscribers,
+ subscriberQueueLength,
+ subscriberQueueFull,
+ subscriberEventQueued,
+ }
+)
+
+// MetricsTracer tracks metrics for the eventbus subsystem
+type MetricsTracer interface {
+
+ // EventEmitted counts the total number of events grouped by event type
+ EventEmitted(typ reflect.Type)
+
+ // AddSubscriber adds a subscriber for the event type
+ AddSubscriber(typ reflect.Type)
+
+ // RemoveSubscriber removes a subscriber for the event type
+ RemoveSubscriber(typ reflect.Type)
+
+ // SubscriberQueueLength is the length of the subscribers channel
+ SubscriberQueueLength(name string, n int)
+
+ // SubscriberQueueFull tracks whether a subscribers channel if full
+ SubscriberQueueFull(name string, isFull bool)
+
+ // SubscriberEventQueued counts the total number of events grouped by subscriber
+ SubscriberEventQueued(name string)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (m *metricsTracer) EventEmitted(typ reflect.Type) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
+ eventsEmitted.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) AddSubscriber(typ reflect.Type) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
+ totalSubscribers.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) RemoveSubscriber(typ reflect.Type) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
+ totalSubscribers.WithLabelValues(*tags...).Dec()
+}
+
+func (m *metricsTracer) SubscriberQueueLength(name string, n int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+ subscriberQueueLength.WithLabelValues(*tags...).Set(float64(n))
+}
+
+func (m *metricsTracer) SubscriberQueueFull(name string, isFull bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+ observer := subscriberQueueFull.WithLabelValues(*tags...)
+ if isFull {
+ observer.Set(1)
+ } else {
+ observer.Set(0)
+ }
+}
+
+func (m *metricsTracer) SubscriberEventQueued(name string) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+ subscriberEventQueued.WithLabelValues(*tags...).Inc()
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go
index a8eae6f2f..837a0683f 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go
@@ -1,13 +1,44 @@
package eventbus
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "sync/atomic"
+)
+
type subSettings struct {
buffer int
+ name string
}
+var subCnt atomic.Int64
+
var subSettingsDefault = subSettings{
buffer: 16,
}
+// newSubSettings returns the settings for a new subscriber
+// The default naming strategy is sub--L
+func newSubSettings() subSettings {
+ settings := subSettingsDefault
+ _, file, line, ok := runtime.Caller(2) // skip=1 is eventbus.Subscriber
+ if ok {
+ file = strings.TrimPrefix(file, "github.com/")
+ // remove the version number from the path, for example
+ // go-libp2p-package@v0.x.y-some-hash-123/file.go will be shortened go go-libp2p-package/file.go
+ if idx1 := strings.Index(file, "@"); idx1 != -1 {
+ if idx2 := strings.Index(file[idx1:], "/"); idx2 != -1 {
+ file = file[:idx1] + file[idx1+idx2:]
+ }
+ }
+ settings.name = fmt.Sprintf("%s-L%d", file, line)
+ } else {
+ settings.name = fmt.Sprintf("subscriber-%d", subCnt.Add(1))
+ }
+ return settings
+}
+
func BufSize(n int) func(interface{}) error {
return func(s interface{}) error {
s.(*subSettings).buffer = n
@@ -15,6 +46,13 @@ func BufSize(n int) func(interface{}) error {
}
}
+func Name(name string) func(interface{}) error {
+ return func(s interface{}) error {
+ s.(*subSettings).name = name
+ return nil
+ }
+}
+
type emitterSettings struct {
makeStateful bool
}
@@ -30,3 +68,12 @@ func Stateful(s interface{}) error {
s.(*emitterSettings).makeStateful = true
return nil
}
+
+type Option func(*basicBus)
+
+func WithMetricsTracer(metricsTracer MetricsTracer) Option {
+ return func(bus *basicBus) {
+ bus.metricsTracer = metricsTracer
+ bus.wildcard.metricsTracer = metricsTracer
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
index 28aa6d4d3..67f9f9146 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
@@ -46,7 +46,10 @@ type addrSegment struct {
}
func (segments *addrSegments) get(p peer.ID) *addrSegment {
- return segments[byte(p[len(p)-1])]
+ if len(p) == 0 { // it's not terribly useful to use an empty peer ID, but at least we should not panic
+ return segments[0]
+ }
+ return segments[uint8(p[len(p)-1])]
}
type clock interface {
@@ -235,11 +238,16 @@ func (mab *memoryAddrBook) addAddrsUnlocked(s *addrSegment, p peer.ID, addrs []m
exp := mab.clock.Now().Add(ttl)
for _, addr := range addrs {
+ // Remove suffix of /p2p/peer-id from address
+ addr, addrPid := peer.SplitAddr(addr)
if addr == nil {
- log.Warnw("was passed nil multiaddr", "peer", p)
+ log.Warnw("Was passed nil multiaddr", "peer", p)
+ continue
+ }
+ if addrPid != "" && addrPid != p {
+ log.Warnf("Was passed p2p address with a different peerId. found: %s, expected: %s", addrPid, p)
continue
}
-
// find the highest TTL and Expiry time between
// existing records and function args
a, found := amap[string(addr.Bytes())] // won't allocate.
@@ -280,10 +288,15 @@ func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Du
exp := mab.clock.Now().Add(ttl)
for _, addr := range addrs {
+ addr, addrPid := peer.SplitAddr(addr)
if addr == nil {
log.Warnw("was passed nil multiaddr", "peer", p)
continue
}
+ if addrPid != "" && addrPid != p {
+ log.Warnf("was passed p2p address with a different peerId, found: %s wanted: %s", addrPid, p)
+ continue
+ }
aBytes := addr.Bytes()
key := string(aBytes)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
index 7a955c076..0000f97ff 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
@@ -6,11 +6,12 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
)
type protoSegment struct {
sync.RWMutex
- protocols map[peer.ID]map[string]struct{}
+ protocols map[peer.ID]map[protocol.ID]struct{}
}
type protoSegments [256]*protoSegment
@@ -27,7 +28,7 @@ type memoryProtoBook struct {
maxProtos int
lk sync.RWMutex
- interned map[string]string
+ interned map[protocol.ID]protocol.ID
}
var _ pstore.ProtoBook = (*memoryProtoBook)(nil)
@@ -43,11 +44,11 @@ func WithMaxProtocols(num int) ProtoBookOption {
func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) {
pb := &memoryProtoBook{
- interned: make(map[string]string, 256),
+ interned: make(map[protocol.ID]protocol.ID, 256),
segments: func() (ret protoSegments) {
for i := range ret {
ret[i] = &protoSegment{
- protocols: make(map[peer.ID]map[string]struct{}),
+ protocols: make(map[peer.ID]map[protocol.ID]struct{}),
}
}
return ret
@@ -63,7 +64,7 @@ func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) {
return pb, nil
}
-func (pb *memoryProtoBook) internProtocol(proto string) string {
+func (pb *memoryProtoBook) internProtocol(proto protocol.ID) protocol.ID {
// check if it is interned with the read lock
pb.lk.RLock()
interned, ok := pb.interned[proto]
@@ -87,12 +88,12 @@ func (pb *memoryProtoBook) internProtocol(proto string) string {
return proto
}
-func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...string) error {
+func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error {
if len(protos) > pb.maxProtos {
return errTooManyProtocols
}
- newprotos := make(map[string]struct{}, len(protos))
+ newprotos := make(map[protocol.ID]struct{}, len(protos))
for _, proto := range protos {
newprotos[pb.internProtocol(proto)] = struct{}{}
}
@@ -105,14 +106,14 @@ func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...string) error {
return nil
}
-func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...string) error {
+func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error {
s := pb.segments.get(p)
s.Lock()
defer s.Unlock()
protomap, ok := s.protocols[p]
if !ok {
- protomap = make(map[string]struct{})
+ protomap = make(map[protocol.ID]struct{})
s.protocols[p] = protomap
}
if len(protomap)+len(protos) > pb.maxProtos {
@@ -125,12 +126,12 @@ func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...string) error {
return nil
}
-func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]string, error) {
+func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) {
s := pb.segments.get(p)
s.RLock()
defer s.RUnlock()
- out := make([]string, 0, len(s.protocols[p]))
+ out := make([]protocol.ID, 0, len(s.protocols[p]))
for k := range s.protocols[p] {
out = append(out, k)
}
@@ -138,7 +139,7 @@ func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]string, error) {
return out, nil
}
-func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...string) error {
+func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error {
s := pb.segments.get(p)
s.Lock()
defer s.Unlock()
@@ -155,12 +156,12 @@ func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...string) error {
return nil
}
-func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...string) ([]string, error) {
+func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) {
s := pb.segments.get(p)
s.RLock()
defer s.RUnlock()
- out := make([]string, 0, len(protos))
+ out := make([]protocol.ID, 0, len(protos))
for _, proto := range protos {
if _, ok := s.protocols[p][proto]; ok {
out = append(out, proto)
@@ -170,7 +171,7 @@ func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...string) ([]str
return out, nil
}
-func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...string) (string, error) {
+func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) {
s := pb.segments.get(p)
s.RLock()
defer s.RUnlock()
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go
index f8382c709..d9550f494 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go
@@ -9,6 +9,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
logging "github.com/ipfs/go-log/v2"
)
@@ -68,7 +69,7 @@ func NewPeerstoreManager(pstore peerstore.Peerstore, eventBus event.Bus, opts ..
func (m *PeerstoreManager) Start() {
ctx, cancel := context.WithCancel(context.Background())
m.cancel = cancel
- sub, err := m.eventBus.Subscribe(&event.EvtPeerConnectednessChanged{})
+ sub, err := m.eventBus.Subscribe(&event.EvtPeerConnectednessChanged{}, eventbus.Name("pstoremanager"))
if err != nil {
log.Warnf("subscription failed. Peerstore manager not activated. Error: %s", err)
return
@@ -108,7 +109,8 @@ func (m *PeerstoreManager) background(ctx context.Context, sub event.Subscriptio
// If we reconnect to the peer before we've cleared the information, keep it.
delete(disconnected, p)
}
- case now := <-ticker.C:
+ case <-ticker.C:
+ now := time.Now()
for p, disconnectTime := range disconnected {
if disconnectTime.Add(m.gracePeriod).Before(now) {
m.pstore.RemovePeer(p)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go
index a36e20e00..f9bbc7588 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go
@@ -4,11 +4,11 @@ import (
"context"
"sync"
- relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
-
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
)
type RelayManager struct {
@@ -44,7 +44,7 @@ func (m *RelayManager) background(ctx context.Context) {
m.mutex.Unlock()
}()
- subReachability, _ := m.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
+ subReachability, _ := m.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("relaysvc"))
defer subReachability.Close()
for {
@@ -65,14 +65,19 @@ func (m *RelayManager) background(ctx context.Context) {
func (m *RelayManager) reachabilityChanged(r network.Reachability) error {
switch r {
case network.ReachabilityPublic:
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ // This could happen if two consecutive EvtLocalReachabilityChanged report the same reachability.
+ // This shouldn't happen, but it's safer to double-check.
+ if m.relay != nil {
+ return nil
+ }
relay, err := relayv2.New(m.host, m.opts...)
if err != nil {
return err
}
- m.mutex.Lock()
- defer m.mutex.Unlock()
m.relay = relay
- case network.ReachabilityPrivate:
+ default:
m.mutex.Lock()
defer m.mutex.Unlock()
if m.relay != nil {
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md
index 2d2ab3350..85926100f 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md
@@ -28,14 +28,27 @@ scalingLimits := rcmgr.DefaultLimits
// Add limits around included libp2p protocols
libp2p.SetDefaultServiceLimits(&scalingLimits)
-// Turn the scaling limits into a static set of limits using `.AutoScale`. This
+// Turn the scaling limits into a concrete set of limits using `.AutoScale`. This
// scales the limits proportional to your system memory.
-limits := scalingLimits.AutoScale()
+scaledDefaultLimits := scalingLimits.AutoScale()
+
+// Tweak certain settings
+cfg := rcmgr.PartialLimitConfig{
+ System: rcmgr.ResourceLimits{
+ // Allow unlimited outbound streams
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ // Everything else is default. The exact values will come from `scaledDefaultLimits` above.
+}
+
+// Create our limits by using our cfg and replacing the default values with values from `scaledDefaultLimits`
+limits := cfg.Build(scaledDefaultLimits)
// The resource manager expects a limiter, se we create one from our limits.
limiter := rcmgr.NewFixedLimiter(limits)
-// (Optional if you want metrics) Construct the OpenCensus metrics reporter.
+// (Optional if you want metrics)
+rcmgrObs.MustRegisterWith(prometheus.DefaultRegisterer)
str, err := rcmgrObs.NewStatsTraceReporter()
if err != nil {
panic(err)
@@ -51,6 +64,54 @@ if err != nil {
host, err := libp2p.New(libp2p.ResourceManager(rm))
```
+### Saving the limits config
+The easiest way to save the defined limits is to serialize the `PartialLimitConfig`
+type as JSON.
+
+```go
+noisyNeighbor, _ := peer.Decode("QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf")
+cfg := rcmgr.PartialLimitConfig{
+ System: &rcmgr.ResourceLimits{
+ // Allow unlimited outbound streams
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ Peer: map[peer.ID]rcmgr.ResourceLimits{
+ noisyNeighbor: {
+ // No inbound connections from this peer
+ ConnsInbound: rcmgr.BlockAllLimit,
+ // But let me open connections to them
+ Conns: rcmgr.DefaultLimit,
+ ConnsOutbound: rcmgr.DefaultLimit,
+ // No inbound streams from this peer
+ StreamsInbound: rcmgr.BlockAllLimit,
+ // And let me open unlimited (by me) outbound streams (the peer may have their own limits on me)
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ },
+}
+jsonBytes, _ := json.Marshal(&cfg)
+
+// string(jsonBytes)
+// {
+// "System": {
+// "StreamsOutbound": "unlimited"
+// },
+// "Peer": {
+// "QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf": {
+// "StreamsInbound": "blockAll",
+// "StreamsOutbound": "unlimited",
+// "ConnsInbound": "blockAll"
+// }
+// }
+// }
+```
+
+This will omit defaults from the JSON output. It will also serialize the
+blockAll, and unlimited values explicitly.
+
+The `Memory` field is serialized as a string to workaround the JSON limitation
+of 32 bit integers (`Memory` is an int64).
+
## Basic Resources
### Memory
@@ -278,7 +339,7 @@ This is done using the `ScalingLimitConfig`. For every scope, this configuration
struct defines the absolutely bare minimum limits, and an (optional) increase of
these limits, which will be applied on nodes that have sufficient memory.
-A `ScalingLimitConfig` can be converted into a `LimitConfig` (which can then be
+A `ScalingLimitConfig` can be converted into a `ConcreteLimitConfig` (which can then be
used to initialize a fixed limiter with `NewFixedLimiter`) by calling the `Scale` method.
The `Scale` method takes two parameters: the amount of memory and the number of file
descriptors that an application is willing to dedicate to libp2p.
@@ -346,7 +407,7 @@ go-libp2p process. For the default definitions see [`DefaultLimits` and
If the defaults seem mostly okay, but you want to adjust one facet you can
simply copy the default struct object and update the field you want to change. You can
-apply changes to a `BaseLimit`, `BaseLimitIncrease`, and `LimitConfig` with
+apply changes to a `BaseLimit`, `BaseLimitIncrease`, and `ConcreteLimitConfig` with
`.Apply`.
Example
@@ -386,7 +447,7 @@ Example Log:
The log line above is an example log line that gets emitted if you enable debug
logging in the resource manager. You can do this by setting the environment
-variable `GOLOG_LOG_LEVEL="rcmgr=info"`. By default only the error is
+variable `GOLOG_LOG_LEVEL="rcmgr=debug"`. By default only the error is
returned to the caller, and nothing is logged by the resource manager itself.
The log line message (and returned error) will tell you which resource limit was
@@ -427,10 +488,10 @@ your limits often. This could be a sign that you need to raise your limits
(your process is more intensive than you originally thought) or that you need
to fix something in your application (surely you don't need over 1000 streams?).
-There are OpenCensus metrics that can be hooked up to the resource manager. See
+There are Prometheus metrics that can be hooked up to the resource manager. See
`obs/stats_test.go` for an example on how to enable this, and `DefaultViews` in
`stats.go` for recommended views. These metrics can be hooked up to Prometheus
-or any other OpenCensus supported platform.
+or any other platform that can scrape a prometheus endpoint.
There is also an included Grafana dashboard to help kickstart your
observability into the resource manager. Find more information about it at
@@ -449,7 +510,7 @@ Look at `WithAllowlistedMultiaddrs` and its example in the GoDoc to learn more.
## ConnManager vs Resource Manager
go-libp2p already includes a [connection
-manager](https://pkg.go.dev/github.com/libp2p/go-libp2p-core/connmgr#ConnManager),
+manager](https://pkg.go.dev/github.com/libp2p/go-libp2p/core/connmgr#ConnManager),
so what's the difference between the `ConnManager` and the `ResourceManager`?
ConnManager:
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go
index 4b02672b8..1e87e00aa 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go
@@ -6,13 +6,13 @@ import (
"github.com/libp2p/go-libp2p/core/network"
)
-type errStreamOrConnLimitExceeded struct {
+type ErrStreamOrConnLimitExceeded struct {
current, attempted, limit int
err error
}
-func (e *errStreamOrConnLimitExceeded) Error() string { return e.err.Error() }
-func (e *errStreamOrConnLimitExceeded) Unwrap() error { return e.err }
+func (e *ErrStreamOrConnLimitExceeded) Error() string { return e.err.Error() }
+func (e *ErrStreamOrConnLimitExceeded) Unwrap() error { return e.err }
// edge may be "" if this is not an edge error
func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []interface{} {
@@ -22,7 +22,7 @@ func logValuesStreamLimit(scope, edge string, dir network.Direction, stat networ
logValues = append(logValues, "edge", edge)
}
logValues = append(logValues, "direction", dir)
- var e *errStreamOrConnLimitExceeded
+ var e *ErrStreamOrConnLimitExceeded
if errors.As(err, &e) {
logValues = append(logValues,
"current", e.current,
@@ -41,7 +41,7 @@ func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, s
logValues = append(logValues, "edge", edge)
}
logValues = append(logValues, "direction", dir, "usefd", usefd)
- var e *errStreamOrConnLimitExceeded
+ var e *ErrStreamOrConnLimitExceeded
if errors.As(err, &e) {
logValues = append(logValues,
"current", e.current,
@@ -52,14 +52,14 @@ func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, s
return append(logValues, "stat", stat, "error", err)
}
-type errMemoryLimitExceeded struct {
+type ErrMemoryLimitExceeded struct {
current, attempted, limit int64
priority uint8
err error
}
-func (e *errMemoryLimitExceeded) Error() string { return e.err.Error() }
-func (e *errMemoryLimitExceeded) Unwrap() error { return e.err }
+func (e *ErrMemoryLimitExceeded) Error() string { return e.err.Error() }
+func (e *ErrMemoryLimitExceeded) Unwrap() error { return e.err }
// edge may be "" if this is not an edge error
func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []interface{} {
@@ -68,7 +68,7 @@ func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error)
if edge != "" {
logValues = append(logValues, "edge", edge)
}
- var e *errMemoryLimitExceeded
+ var e *ErrMemoryLimitExceeded
if errors.As(err, &e) {
logValues = append(logValues,
"current", e.current,
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go
index 302678e19..03edcd79e 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go
@@ -87,7 +87,7 @@ func (r *resourceManager) ListProtocols() []protocol.ID {
}
sort.Slice(result, func(i, j int) bool {
- return strings.Compare(string(result[i]), string(result[j])) < 0
+ return result[i] < result[j]
})
return result
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go
index 7d0823b1e..ef7fcdc9b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go
@@ -12,6 +12,7 @@ package rcmgr
import (
"encoding/json"
"io"
+ "math"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@@ -56,7 +57,7 @@ func NewDefaultLimiterFromJSON(in io.Reader) (Limiter, error) {
}
// NewLimiterFromJSON creates a new limiter by parsing a json configuration.
-func NewLimiterFromJSON(in io.Reader, defaults LimitConfig) (Limiter, error) {
+func NewLimiterFromJSON(in io.Reader, defaults ConcreteLimitConfig) (Limiter, error) {
cfg, err := readLimiterConfigFromJSON(in, defaults)
if err != nil {
return nil, err
@@ -64,37 +65,67 @@ func NewLimiterFromJSON(in io.Reader, defaults LimitConfig) (Limiter, error) {
return &fixedLimiter{cfg}, nil
}
-func readLimiterConfigFromJSON(in io.Reader, defaults LimitConfig) (LimitConfig, error) {
- var cfg LimitConfig
+func readLimiterConfigFromJSON(in io.Reader, defaults ConcreteLimitConfig) (ConcreteLimitConfig, error) {
+ var cfg PartialLimitConfig
if err := json.NewDecoder(in).Decode(&cfg); err != nil {
- return LimitConfig{}, err
+ return ConcreteLimitConfig{}, err
}
- cfg.Apply(defaults)
- return cfg, nil
+ return cfg.Build(defaults), nil
}
// fixedLimiter is a limiter with fixed limits.
type fixedLimiter struct {
- LimitConfig
+ ConcreteLimitConfig
}
var _ Limiter = (*fixedLimiter)(nil)
-func NewFixedLimiter(conf LimitConfig) Limiter {
+func NewFixedLimiter(conf ConcreteLimitConfig) Limiter {
log.Debugw("initializing new limiter with config", "limits", conf)
- return &fixedLimiter{LimitConfig: conf}
+ return &fixedLimiter{conf}
}
// BaseLimit is a mixin type for basic resource limits.
type BaseLimit struct {
- Streams int
- StreamsInbound int
- StreamsOutbound int
- Conns int
- ConnsInbound int
- ConnsOutbound int
- FD int
- Memory int64
+ Streams int `json:",omitempty"`
+ StreamsInbound int `json:",omitempty"`
+ StreamsOutbound int `json:",omitempty"`
+ Conns int `json:",omitempty"`
+ ConnsInbound int `json:",omitempty"`
+ ConnsOutbound int `json:",omitempty"`
+ FD int `json:",omitempty"`
+ Memory int64 `json:",omitempty"`
+}
+
+func valueOrBlockAll(n int) LimitVal {
+ if n == 0 {
+ return BlockAllLimit
+ } else if n == math.MaxInt {
+ return Unlimited
+ }
+ return LimitVal(n)
+}
+func valueOrBlockAll64(n int64) LimitVal64 {
+ if n == 0 {
+ return BlockAllLimit64
+ } else if n == math.MaxInt {
+ return Unlimited64
+ }
+ return LimitVal64(n)
+}
+
+// ToResourceLimits converts the BaseLimit to a ResourceLimits
+func (l BaseLimit) ToResourceLimits() ResourceLimits {
+ return ResourceLimits{
+ Streams: valueOrBlockAll(l.Streams),
+ StreamsInbound: valueOrBlockAll(l.StreamsInbound),
+ StreamsOutbound: valueOrBlockAll(l.StreamsOutbound),
+ Conns: valueOrBlockAll(l.Conns),
+ ConnsInbound: valueOrBlockAll(l.ConnsInbound),
+ ConnsOutbound: valueOrBlockAll(l.ConnsOutbound),
+ FD: valueOrBlockAll(l.FD),
+ Memory: valueOrBlockAll64(l.Memory),
+ }
}
// Apply overwrites all zero-valued limits with the values of l2
@@ -128,16 +159,16 @@ func (l *BaseLimit) Apply(l2 BaseLimit) {
// BaseLimitIncrease is the increase per GiB of allowed memory.
type BaseLimitIncrease struct {
- Streams int
- StreamsInbound int
- StreamsOutbound int
- Conns int
- ConnsInbound int
- ConnsOutbound int
+ Streams int `json:",omitempty"`
+ StreamsInbound int `json:",omitempty"`
+ StreamsOutbound int `json:",omitempty"`
+ Conns int `json:",omitempty"`
+ ConnsInbound int `json:",omitempty"`
+ ConnsOutbound int `json:",omitempty"`
// Memory is in bytes. Values over 1>>30 (1GiB) don't make sense.
- Memory int64
+ Memory int64 `json:",omitempty"`
// FDFraction is expected to be >= 0 and <= 1.
- FDFraction float64
+ FDFraction float64 `json:",omitempty"`
}
// Apply overwrites all zero-valued limits with the values of l2
@@ -169,7 +200,7 @@ func (l *BaseLimitIncrease) Apply(l2 BaseLimitIncrease) {
}
}
-func (l *BaseLimit) GetStreamLimit(dir network.Direction) int {
+func (l BaseLimit) GetStreamLimit(dir network.Direction) int {
if dir == network.DirInbound {
return l.StreamsInbound
} else {
@@ -177,11 +208,11 @@ func (l *BaseLimit) GetStreamLimit(dir network.Direction) int {
}
}
-func (l *BaseLimit) GetStreamTotalLimit() int {
+func (l BaseLimit) GetStreamTotalLimit() int {
return l.Streams
}
-func (l *BaseLimit) GetConnLimit(dir network.Direction) int {
+func (l BaseLimit) GetConnLimit(dir network.Direction) int {
if dir == network.DirInbound {
return l.ConnsInbound
} else {
@@ -189,78 +220,78 @@ func (l *BaseLimit) GetConnLimit(dir network.Direction) int {
}
}
-func (l *BaseLimit) GetConnTotalLimit() int {
+func (l BaseLimit) GetConnTotalLimit() int {
return l.Conns
}
-func (l *BaseLimit) GetFDLimit() int {
+func (l BaseLimit) GetFDLimit() int {
return l.FD
}
-func (l *BaseLimit) GetMemoryLimit() int64 {
+func (l BaseLimit) GetMemoryLimit() int64 {
return l.Memory
}
func (l *fixedLimiter) GetSystemLimits() Limit {
- return &l.System
+ return &l.system
}
func (l *fixedLimiter) GetTransientLimits() Limit {
- return &l.Transient
+ return &l.transient
}
func (l *fixedLimiter) GetAllowlistedSystemLimits() Limit {
- return &l.AllowlistedSystem
+ return &l.allowlistedSystem
}
func (l *fixedLimiter) GetAllowlistedTransientLimits() Limit {
- return &l.AllowlistedTransient
+ return &l.allowlistedTransient
}
func (l *fixedLimiter) GetServiceLimits(svc string) Limit {
- sl, ok := l.Service[svc]
+ sl, ok := l.service[svc]
if !ok {
- return &l.ServiceDefault
+ return &l.serviceDefault
}
return &sl
}
func (l *fixedLimiter) GetServicePeerLimits(svc string) Limit {
- pl, ok := l.ServicePeer[svc]
+ pl, ok := l.servicePeer[svc]
if !ok {
- return &l.ServicePeerDefault
+ return &l.servicePeerDefault
}
return &pl
}
func (l *fixedLimiter) GetProtocolLimits(proto protocol.ID) Limit {
- pl, ok := l.Protocol[proto]
+ pl, ok := l.protocol[proto]
if !ok {
- return &l.ProtocolDefault
+ return &l.protocolDefault
}
return &pl
}
func (l *fixedLimiter) GetProtocolPeerLimits(proto protocol.ID) Limit {
- pl, ok := l.ProtocolPeer[proto]
+ pl, ok := l.protocolPeer[proto]
if !ok {
- return &l.ProtocolPeerDefault
+ return &l.protocolPeerDefault
}
return &pl
}
func (l *fixedLimiter) GetPeerLimits(p peer.ID) Limit {
- pl, ok := l.Peer[p]
+ pl, ok := l.peer[p]
if !ok {
- return &l.PeerDefault
+ return &l.peerDefault
}
return &pl
}
func (l *fixedLimiter) GetStreamLimits(_ peer.ID) Limit {
- return &l.Stream
+ return &l.stream
}
func (l *fixedLimiter) GetConnLimits() Limit {
- return &l.Conn
+ return &l.conn
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json
new file mode 100644
index 000000000..b1a5e9ecb
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json
@@ -0,0 +1,45 @@
+{
+ "System": {
+ "Memory": 65536,
+ "Conns": 16,
+ "ConnsInbound": 8,
+ "ConnsOutbound": 16,
+ "FD": 16
+ },
+ "ServiceDefault": {
+ "Memory": 8765
+ },
+ "Service": {
+ "A": {
+ "Memory": 8192
+ },
+ "B": {}
+ },
+ "ServicePeerDefault": {
+ "Memory": 2048
+ },
+ "ServicePeer": {
+ "A": {
+ "Memory": 4096
+ }
+ },
+ "ProtocolDefault": {
+ "Memory": 2048
+ },
+ "ProtocolPeerDefault": {
+ "Memory": 1024
+ },
+ "Protocol": {
+ "/A": {
+ "Memory": 8192
+ }
+ },
+ "PeerDefault": {
+ "Memory": 4096
+ },
+ "Peer": {
+ "12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS": {
+ "Memory": 4097
+ }
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go
index 3a973d99a..e7489c45d 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go
@@ -2,8 +2,11 @@ package rcmgr
import (
"encoding/json"
+ "fmt"
"math"
+ "strconv"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
@@ -108,148 +111,468 @@ func (cfg *ScalingLimitConfig) AddProtocolPeerLimit(proto protocol.ID, base Base
}
}
-type LimitConfig struct {
- System BaseLimit `json:",omitempty"`
- Transient BaseLimit `json:",omitempty"`
+type LimitVal int
+
+const (
+ // DefaultLimit is the default value for resources. The exact value depends on the context, but will get values from `DefaultLimits`.
+ DefaultLimit LimitVal = 0
+ // Unlimited is the value for unlimited resources. An arbitrarily high number will also work.
+ Unlimited LimitVal = -1
+ // BlockAllLimit is the LimitVal for allowing no amount of resources.
+ BlockAllLimit LimitVal = -2
+)
+
+func (l LimitVal) MarshalJSON() ([]byte, error) {
+ if l == Unlimited {
+ return json.Marshal("unlimited")
+ } else if l == DefaultLimit {
+ return json.Marshal("default")
+ } else if l == BlockAllLimit {
+ return json.Marshal("blockAll")
+ }
+ return json.Marshal(int(l))
+}
+
+func (l *LimitVal) UnmarshalJSON(b []byte) error {
+ if string(b) == `"default"` {
+ *l = DefaultLimit
+ return nil
+ } else if string(b) == `"unlimited"` {
+ *l = Unlimited
+ return nil
+ } else if string(b) == `"blockAll"` {
+ *l = BlockAllLimit
+ return nil
+ }
+
+ var val int
+ if err := json.Unmarshal(b, &val); err != nil {
+ return err
+ }
+
+ if val == 0 {
+ // If there is an explicit 0 in the JSON we should interpret this as block all.
+ *l = BlockAllLimit
+ return nil
+ }
+
+ *l = LimitVal(val)
+ return nil
+}
+
+func (l LimitVal) Build(defaultVal int) int {
+ if l == DefaultLimit {
+ return defaultVal
+ }
+ if l == Unlimited {
+ return math.MaxInt
+ }
+ if l == BlockAllLimit {
+ return 0
+ }
+ return int(l)
+}
+
+type LimitVal64 int64
+
+const (
+ // Default is the default value for resources.
+ DefaultLimit64 LimitVal64 = 0
+ // Unlimited is the value for unlimited resources.
+ Unlimited64 LimitVal64 = -1
+ // BlockAllLimit64 is the LimitVal for allowing no amount of resources.
+ BlockAllLimit64 LimitVal64 = -2
+)
+
+func (l LimitVal64) MarshalJSON() ([]byte, error) {
+ if l == Unlimited64 {
+ return json.Marshal("unlimited")
+ } else if l == DefaultLimit64 {
+ return json.Marshal("default")
+ } else if l == BlockAllLimit64 {
+ return json.Marshal("blockAll")
+ }
+
+ // Convert this to a string because JSON doesn't support 64-bit integers.
+ return json.Marshal(strconv.FormatInt(int64(l), 10))
+}
+
+func (l *LimitVal64) UnmarshalJSON(b []byte) error {
+ if string(b) == `"default"` {
+ *l = DefaultLimit64
+ return nil
+ } else if string(b) == `"unlimited"` {
+ *l = Unlimited64
+ return nil
+ } else if string(b) == `"blockAll"` {
+ *l = BlockAllLimit64
+ return nil
+ }
+
+ var val string
+ if err := json.Unmarshal(b, &val); err != nil {
+ // Is this an integer? Possible because of backwards compatibility.
+ var val int
+ if err := json.Unmarshal(b, &val); err != nil {
+ return fmt.Errorf("failed to unmarshal limit value: %w", err)
+ }
+
+ if val == 0 {
+ // If there is an explicit 0 in the JSON we should interpret this as block all.
+ *l = BlockAllLimit64
+ return nil
+ }
+
+ *l = LimitVal64(val)
+ return nil
+ }
+
+ i, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return err
+ }
+
+ if i == 0 {
+ // If there is an explicit 0 in the JSON we should interpret this as block all.
+ *l = BlockAllLimit64
+ return nil
+ }
+
+ *l = LimitVal64(i)
+ return nil
+}
+
+func (l LimitVal64) Build(defaultVal int64) int64 {
+ if l == DefaultLimit64 {
+ return defaultVal
+ }
+ if l == Unlimited64 {
+ return math.MaxInt64
+ }
+ if l == BlockAllLimit64 {
+ return 0
+ }
+ return int64(l)
+}
+
+// ResourceLimits is the type for basic resource limits.
+type ResourceLimits struct {
+ Streams LimitVal `json:",omitempty"`
+ StreamsInbound LimitVal `json:",omitempty"`
+ StreamsOutbound LimitVal `json:",omitempty"`
+ Conns LimitVal `json:",omitempty"`
+ ConnsInbound LimitVal `json:",omitempty"`
+ ConnsOutbound LimitVal `json:",omitempty"`
+ FD LimitVal `json:",omitempty"`
+ Memory LimitVal64 `json:",omitempty"`
+}
+
+func (l *ResourceLimits) IsDefault() bool {
+ if l == nil {
+ return true
+ }
+
+ if l.Streams == DefaultLimit &&
+ l.StreamsInbound == DefaultLimit &&
+ l.StreamsOutbound == DefaultLimit &&
+ l.Conns == DefaultLimit &&
+ l.ConnsInbound == DefaultLimit &&
+ l.ConnsOutbound == DefaultLimit &&
+ l.FD == DefaultLimit &&
+ l.Memory == DefaultLimit64 {
+ return true
+ }
+ return false
+}
+
+func (l *ResourceLimits) ToMaybeNilPtr() *ResourceLimits {
+ if l.IsDefault() {
+ return nil
+ }
+ return l
+}
+
+// Apply overwrites all default limits with the values of l2
+func (l *ResourceLimits) Apply(l2 ResourceLimits) {
+ if l.Streams == DefaultLimit {
+ l.Streams = l2.Streams
+ }
+ if l.StreamsInbound == DefaultLimit {
+ l.StreamsInbound = l2.StreamsInbound
+ }
+ if l.StreamsOutbound == DefaultLimit {
+ l.StreamsOutbound = l2.StreamsOutbound
+ }
+ if l.Conns == DefaultLimit {
+ l.Conns = l2.Conns
+ }
+ if l.ConnsInbound == DefaultLimit {
+ l.ConnsInbound = l2.ConnsInbound
+ }
+ if l.ConnsOutbound == DefaultLimit {
+ l.ConnsOutbound = l2.ConnsOutbound
+ }
+ if l.FD == DefaultLimit {
+ l.FD = l2.FD
+ }
+ if l.Memory == DefaultLimit64 {
+ l.Memory = l2.Memory
+ }
+}
+
+func (l *ResourceLimits) Build(defaults Limit) BaseLimit {
+ if l == nil {
+ return BaseLimit{
+ Streams: defaults.GetStreamTotalLimit(),
+ StreamsInbound: defaults.GetStreamLimit(network.DirInbound),
+ StreamsOutbound: defaults.GetStreamLimit(network.DirOutbound),
+ Conns: defaults.GetConnTotalLimit(),
+ ConnsInbound: defaults.GetConnLimit(network.DirInbound),
+ ConnsOutbound: defaults.GetConnLimit(network.DirOutbound),
+ FD: defaults.GetFDLimit(),
+ Memory: defaults.GetMemoryLimit(),
+ }
+ }
+
+ return BaseLimit{
+ Streams: l.Streams.Build(defaults.GetStreamTotalLimit()),
+ StreamsInbound: l.StreamsInbound.Build(defaults.GetStreamLimit(network.DirInbound)),
+ StreamsOutbound: l.StreamsOutbound.Build(defaults.GetStreamLimit(network.DirOutbound)),
+ Conns: l.Conns.Build(defaults.GetConnTotalLimit()),
+ ConnsInbound: l.ConnsInbound.Build(defaults.GetConnLimit(network.DirInbound)),
+ ConnsOutbound: l.ConnsOutbound.Build(defaults.GetConnLimit(network.DirOutbound)),
+ FD: l.FD.Build(defaults.GetFDLimit()),
+ Memory: l.Memory.Build(defaults.GetMemoryLimit()),
+ }
+}
+
+type PartialLimitConfig struct {
+ System ResourceLimits `json:",omitempty"`
+ Transient ResourceLimits `json:",omitempty"`
// Limits that are applied to resources with an allowlisted multiaddr.
// These will only be used if the normal System & Transient limits are
// reached.
- AllowlistedSystem BaseLimit `json:",omitempty"`
- AllowlistedTransient BaseLimit `json:",omitempty"`
+ AllowlistedSystem ResourceLimits `json:",omitempty"`
+ AllowlistedTransient ResourceLimits `json:",omitempty"`
- ServiceDefault BaseLimit `json:",omitempty"`
- Service map[string]BaseLimit `json:",omitempty"`
+ ServiceDefault ResourceLimits `json:",omitempty"`
+ Service map[string]ResourceLimits `json:",omitempty"`
- ServicePeerDefault BaseLimit `json:",omitempty"`
- ServicePeer map[string]BaseLimit `json:",omitempty"`
+ ServicePeerDefault ResourceLimits `json:",omitempty"`
+ ServicePeer map[string]ResourceLimits `json:",omitempty"`
- ProtocolDefault BaseLimit `json:",omitempty"`
- Protocol map[protocol.ID]BaseLimit `json:",omitempty"`
+ ProtocolDefault ResourceLimits `json:",omitempty"`
+ Protocol map[protocol.ID]ResourceLimits `json:",omitempty"`
- ProtocolPeerDefault BaseLimit `json:",omitempty"`
- ProtocolPeer map[protocol.ID]BaseLimit `json:",omitempty"`
+ ProtocolPeerDefault ResourceLimits `json:",omitempty"`
+ ProtocolPeer map[protocol.ID]ResourceLimits `json:",omitempty"`
- PeerDefault BaseLimit `json:",omitempty"`
- Peer map[peer.ID]BaseLimit `json:",omitempty"`
+ PeerDefault ResourceLimits `json:",omitempty"`
+ Peer map[peer.ID]ResourceLimits `json:",omitempty"`
- Conn BaseLimit `json:",omitempty"`
- Stream BaseLimit `json:",omitempty"`
+ Conn ResourceLimits `json:",omitempty"`
+ Stream ResourceLimits `json:",omitempty"`
}
-func (cfg *LimitConfig) MarshalJSON() ([]byte, error) {
+func (cfg *PartialLimitConfig) MarshalJSON() ([]byte, error) {
// we want to marshal the encoded peer id
- encodedPeerMap := make(map[string]BaseLimit, len(cfg.Peer))
+ encodedPeerMap := make(map[string]ResourceLimits, len(cfg.Peer))
for p, v := range cfg.Peer {
encodedPeerMap[p.String()] = v
}
- type Alias LimitConfig
+ type Alias PartialLimitConfig
return json.Marshal(&struct {
*Alias
- Peer map[string]BaseLimit `json:",omitempty"`
+ // String so we can have the properly marshalled peer id
+ Peer map[string]ResourceLimits `json:",omitempty"`
+
+ // The rest of the fields as pointers so that we omit empty values in the serialized result
+ System *ResourceLimits `json:",omitempty"`
+ Transient *ResourceLimits `json:",omitempty"`
+ AllowlistedSystem *ResourceLimits `json:",omitempty"`
+ AllowlistedTransient *ResourceLimits `json:",omitempty"`
+
+ ServiceDefault *ResourceLimits `json:",omitempty"`
+
+ ServicePeerDefault *ResourceLimits `json:",omitempty"`
+
+ ProtocolDefault *ResourceLimits `json:",omitempty"`
+
+ ProtocolPeerDefault *ResourceLimits `json:",omitempty"`
+
+ PeerDefault *ResourceLimits `json:",omitempty"`
+
+ Conn *ResourceLimits `json:",omitempty"`
+ Stream *ResourceLimits `json:",omitempty"`
}{
Alias: (*Alias)(cfg),
Peer: encodedPeerMap,
+
+ System: cfg.System.ToMaybeNilPtr(),
+ Transient: cfg.Transient.ToMaybeNilPtr(),
+ AllowlistedSystem: cfg.AllowlistedSystem.ToMaybeNilPtr(),
+ AllowlistedTransient: cfg.AllowlistedTransient.ToMaybeNilPtr(),
+ ServiceDefault: cfg.ServiceDefault.ToMaybeNilPtr(),
+ ServicePeerDefault: cfg.ServicePeerDefault.ToMaybeNilPtr(),
+ ProtocolDefault: cfg.ProtocolDefault.ToMaybeNilPtr(),
+ ProtocolPeerDefault: cfg.ProtocolPeerDefault.ToMaybeNilPtr(),
+ PeerDefault: cfg.PeerDefault.ToMaybeNilPtr(),
+ Conn: cfg.Conn.ToMaybeNilPtr(),
+ Stream: cfg.Stream.ToMaybeNilPtr(),
})
}
-func (cfg *LimitConfig) Apply(c LimitConfig) {
+func applyResourceLimitsMap[K comparable](this *map[K]ResourceLimits, other map[K]ResourceLimits, fallbackDefault ResourceLimits) {
+ for k, l := range *this {
+ r := fallbackDefault
+ if l2, ok := other[k]; ok {
+ r = l2
+ }
+ l.Apply(r)
+ (*this)[k] = l
+ }
+ if *this == nil && other != nil {
+ *this = make(map[K]ResourceLimits)
+ }
+ for k, l := range other {
+ if _, ok := (*this)[k]; !ok {
+ (*this)[k] = l
+ }
+ }
+}
+
+func (cfg *PartialLimitConfig) Apply(c PartialLimitConfig) {
cfg.System.Apply(c.System)
cfg.Transient.Apply(c.Transient)
cfg.AllowlistedSystem.Apply(c.AllowlistedSystem)
cfg.AllowlistedTransient.Apply(c.AllowlistedTransient)
cfg.ServiceDefault.Apply(c.ServiceDefault)
+ cfg.ServicePeerDefault.Apply(c.ServicePeerDefault)
cfg.ProtocolDefault.Apply(c.ProtocolDefault)
cfg.ProtocolPeerDefault.Apply(c.ProtocolPeerDefault)
cfg.PeerDefault.Apply(c.PeerDefault)
cfg.Conn.Apply(c.Conn)
cfg.Stream.Apply(c.Stream)
- // TODO: the following could be solved a lot nicer, if only we could use generics
- for s, l := range cfg.Service {
- r := cfg.ServiceDefault
- if l2, ok := c.Service[s]; ok {
- r = l2
- }
- l.Apply(r)
- cfg.Service[s] = l
- }
- if c.Service != nil && cfg.Service == nil {
- cfg.Service = make(map[string]BaseLimit)
- }
- for s, l := range c.Service {
- if _, ok := cfg.Service[s]; !ok {
- cfg.Service[s] = l
- }
- }
+ applyResourceLimitsMap(&cfg.Service, c.Service, cfg.ServiceDefault)
+ applyResourceLimitsMap(&cfg.ServicePeer, c.ServicePeer, cfg.ServicePeerDefault)
+ applyResourceLimitsMap(&cfg.Protocol, c.Protocol, cfg.ProtocolDefault)
+ applyResourceLimitsMap(&cfg.ProtocolPeer, c.ProtocolPeer, cfg.ProtocolPeerDefault)
+ applyResourceLimitsMap(&cfg.Peer, c.Peer, cfg.PeerDefault)
+}
- for s, l := range cfg.ServicePeer {
- r := cfg.ServicePeerDefault
- if l2, ok := c.ServicePeer[s]; ok {
- r = l2
- }
- l.Apply(r)
- cfg.ServicePeer[s] = l
- }
- if c.ServicePeer != nil && cfg.ServicePeer == nil {
- cfg.ServicePeer = make(map[string]BaseLimit)
- }
- for s, l := range c.ServicePeer {
- if _, ok := cfg.ServicePeer[s]; !ok {
- cfg.ServicePeer[s] = l
- }
- }
+func (cfg PartialLimitConfig) Build(defaults ConcreteLimitConfig) ConcreteLimitConfig {
+ out := defaults
+
+ out.system = cfg.System.Build(defaults.system)
+ out.transient = cfg.Transient.Build(defaults.transient)
+ out.allowlistedSystem = cfg.AllowlistedSystem.Build(defaults.allowlistedSystem)
+ out.allowlistedTransient = cfg.AllowlistedTransient.Build(defaults.allowlistedTransient)
+ out.serviceDefault = cfg.ServiceDefault.Build(defaults.serviceDefault)
+ out.servicePeerDefault = cfg.ServicePeerDefault.Build(defaults.servicePeerDefault)
+ out.protocolDefault = cfg.ProtocolDefault.Build(defaults.protocolDefault)
+ out.protocolPeerDefault = cfg.ProtocolPeerDefault.Build(defaults.protocolPeerDefault)
+ out.peerDefault = cfg.PeerDefault.Build(defaults.peerDefault)
+ out.conn = cfg.Conn.Build(defaults.conn)
+ out.stream = cfg.Stream.Build(defaults.stream)
+
+ out.service = buildMapWithDefault(cfg.Service, defaults.service, out.serviceDefault)
+ out.servicePeer = buildMapWithDefault(cfg.ServicePeer, defaults.servicePeer, out.servicePeerDefault)
+ out.protocol = buildMapWithDefault(cfg.Protocol, defaults.protocol, out.protocolDefault)
+ out.protocolPeer = buildMapWithDefault(cfg.ProtocolPeer, defaults.protocolPeer, out.protocolPeerDefault)
+ out.peer = buildMapWithDefault(cfg.Peer, defaults.peer, out.peerDefault)
+
+ return out
+}
- for s, l := range cfg.Protocol {
- r := cfg.ProtocolDefault
- if l2, ok := c.Protocol[s]; ok {
- r = l2
- }
- l.Apply(r)
- cfg.Protocol[s] = l
- }
- if c.Protocol != nil && cfg.Protocol == nil {
- cfg.Protocol = make(map[protocol.ID]BaseLimit)
- }
- for s, l := range c.Protocol {
- if _, ok := cfg.Protocol[s]; !ok {
- cfg.Protocol[s] = l
- }
+func buildMapWithDefault[K comparable](definedLimits map[K]ResourceLimits, defaults map[K]BaseLimit, fallbackDefault BaseLimit) map[K]BaseLimit {
+ if definedLimits == nil && defaults == nil {
+ return nil
}
- for s, l := range cfg.ProtocolPeer {
- r := cfg.ProtocolPeerDefault
- if l2, ok := c.ProtocolPeer[s]; ok {
- r = l2
- }
- l.Apply(r)
- cfg.ProtocolPeer[s] = l
- }
- if c.ProtocolPeer != nil && cfg.ProtocolPeer == nil {
- cfg.ProtocolPeer = make(map[protocol.ID]BaseLimit)
+ out := make(map[K]BaseLimit)
+ for k, l := range defaults {
+ out[k] = l
}
- for s, l := range c.ProtocolPeer {
- if _, ok := cfg.ProtocolPeer[s]; !ok {
- cfg.ProtocolPeer[s] = l
+
+ for k, l := range definedLimits {
+ if defaultForKey, ok := out[k]; ok {
+ out[k] = l.Build(defaultForKey)
+ } else {
+ out[k] = l.Build(fallbackDefault)
}
}
- for s, l := range cfg.Peer {
- r := cfg.PeerDefault
- if l2, ok := c.Peer[s]; ok {
- r = l2
- }
- l.Apply(r)
- cfg.Peer[s] = l
+ return out
+}
+
+// ConcreteLimitConfig is similar to PartialLimitConfig, but all values are defined.
+// There is no unset "default" value. Commonly constructed by calling
+// PartialLimitConfig.Build(rcmgr.DefaultLimits.AutoScale())
+type ConcreteLimitConfig struct {
+ system BaseLimit
+ transient BaseLimit
+
+ // Limits that are applied to resources with an allowlisted multiaddr.
+ // These will only be used if the normal System & Transient limits are
+ // reached.
+ allowlistedSystem BaseLimit
+ allowlistedTransient BaseLimit
+
+ serviceDefault BaseLimit
+ service map[string]BaseLimit
+
+ servicePeerDefault BaseLimit
+ servicePeer map[string]BaseLimit
+
+ protocolDefault BaseLimit
+ protocol map[protocol.ID]BaseLimit
+
+ protocolPeerDefault BaseLimit
+ protocolPeer map[protocol.ID]BaseLimit
+
+ peerDefault BaseLimit
+ peer map[peer.ID]BaseLimit
+
+ conn BaseLimit
+ stream BaseLimit
+}
+
+func resourceLimitsMapFromBaseLimitMap[K comparable](baseLimitMap map[K]BaseLimit) map[K]ResourceLimits {
+ if baseLimitMap == nil {
+ return nil
}
- if c.Peer != nil && cfg.Peer == nil {
- cfg.Peer = make(map[peer.ID]BaseLimit)
+
+ out := make(map[K]ResourceLimits)
+ for k, l := range baseLimitMap {
+ out[k] = l.ToResourceLimits()
}
- for s, l := range c.Peer {
- if _, ok := cfg.Peer[s]; !ok {
- cfg.Peer[s] = l
- }
+
+ return out
+}
+
+// ToPartialLimitConfig converts a ConcreteLimitConfig to a PartialLimitConfig.
+// The returned PartialLimitConfig will have no default values.
+func (cfg ConcreteLimitConfig) ToPartialLimitConfig() PartialLimitConfig {
+ return PartialLimitConfig{
+ System: cfg.system.ToResourceLimits(),
+ Transient: cfg.transient.ToResourceLimits(),
+ AllowlistedSystem: cfg.allowlistedSystem.ToResourceLimits(),
+ AllowlistedTransient: cfg.allowlistedTransient.ToResourceLimits(),
+ ServiceDefault: cfg.serviceDefault.ToResourceLimits(),
+ Service: resourceLimitsMapFromBaseLimitMap(cfg.service),
+ ServicePeerDefault: cfg.servicePeerDefault.ToResourceLimits(),
+ ServicePeer: resourceLimitsMapFromBaseLimitMap(cfg.servicePeer),
+ ProtocolDefault: cfg.protocolDefault.ToResourceLimits(),
+ Protocol: resourceLimitsMapFromBaseLimitMap(cfg.protocol),
+ ProtocolPeerDefault: cfg.protocolPeerDefault.ToResourceLimits(),
+ ProtocolPeer: resourceLimitsMapFromBaseLimitMap(cfg.protocolPeer),
+ PeerDefault: cfg.peerDefault.ToResourceLimits(),
+ Peer: resourceLimitsMapFromBaseLimitMap(cfg.peer),
+ Conn: cfg.conn.ToResourceLimits(),
+ Stream: cfg.stream.ToResourceLimits(),
}
}
@@ -257,54 +580,54 @@ func (cfg *LimitConfig) Apply(c LimitConfig) {
// memory is the amount of memory that the stack is allowed to consume,
// for a dedicated node it's recommended to use 1/8 of the installed system memory.
// If memory is smaller than 128 MB, the base configuration will be used.
-func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) LimitConfig {
- lc := LimitConfig{
- System: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD),
- Transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD),
- AllowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD),
- AllowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD),
- ServiceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD),
- ServicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD),
- ProtocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD),
- ProtocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD),
- PeerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD),
- Conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
- Stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
+func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) ConcreteLimitConfig {
+ lc := ConcreteLimitConfig{
+ system: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD),
+ transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD),
+ allowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD),
+ allowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD),
+ serviceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD),
+ servicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD),
+ protocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD),
+ protocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD),
+ peerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD),
+ conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
+ stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
}
if cfg.ServiceLimits != nil {
- lc.Service = make(map[string]BaseLimit)
+ lc.service = make(map[string]BaseLimit)
for svc, l := range cfg.ServiceLimits {
- lc.Service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ lc.service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
}
}
if cfg.ProtocolLimits != nil {
- lc.Protocol = make(map[protocol.ID]BaseLimit)
+ lc.protocol = make(map[protocol.ID]BaseLimit)
for proto, l := range cfg.ProtocolLimits {
- lc.Protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ lc.protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
}
}
if cfg.PeerLimits != nil {
- lc.Peer = make(map[peer.ID]BaseLimit)
+ lc.peer = make(map[peer.ID]BaseLimit)
for p, l := range cfg.PeerLimits {
- lc.Peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ lc.peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
}
}
if cfg.ServicePeerLimits != nil {
- lc.ServicePeer = make(map[string]BaseLimit)
+ lc.servicePeer = make(map[string]BaseLimit)
for svc, l := range cfg.ServicePeerLimits {
- lc.ServicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ lc.servicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
}
}
if cfg.ProtocolPeerLimits != nil {
- lc.ProtocolPeer = make(map[protocol.ID]BaseLimit)
+ lc.protocolPeer = make(map[protocol.ID]BaseLimit)
for p, l := range cfg.ProtocolPeerLimits {
- lc.ProtocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ lc.protocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
}
}
return lc
}
-func (cfg *ScalingLimitConfig) AutoScale() LimitConfig {
+func (cfg *ScalingLimitConfig) AutoScale() ConcreteLimitConfig {
return cfg.Scale(
int64(memory.TotalMemory())/8,
getNumFDs()/2,
@@ -491,7 +814,10 @@ var DefaultLimits = ScalingLimitConfig{
},
PeerBaseLimit: BaseLimit{
- ConnsInbound: 4,
+ // 8 for now so that it matches the number of concurrent dials we may do
+ // in swarm_dial.go. With future smart dialing work we should bring this
+ // down
+ ConnsInbound: 8,
ConnsOutbound: 8,
Conns: 8,
StreamsInbound: 256,
@@ -514,7 +840,7 @@ var DefaultLimits = ScalingLimitConfig{
ConnsOutbound: 1,
Conns: 1,
FD: 1,
- Memory: 1 << 20,
+ Memory: 32 << 20,
},
StreamBaseLimit: BaseLimit{
@@ -536,18 +862,18 @@ var infiniteBaseLimit = BaseLimit{
Memory: math.MaxInt64,
}
-// InfiniteLimits are a limiter configuration that uses infinite limits, thus effectively not limiting anything.
+// InfiniteLimits are a limiter configuration that uses unlimited limits, thus effectively not limiting anything.
// Keep in mind that the operating system limits the number of file descriptors that an application can use.
-var InfiniteLimits = LimitConfig{
- System: infiniteBaseLimit,
- Transient: infiniteBaseLimit,
- AllowlistedSystem: infiniteBaseLimit,
- AllowlistedTransient: infiniteBaseLimit,
- ServiceDefault: infiniteBaseLimit,
- ServicePeerDefault: infiniteBaseLimit,
- ProtocolDefault: infiniteBaseLimit,
- ProtocolPeerDefault: infiniteBaseLimit,
- PeerDefault: infiniteBaseLimit,
- Conn: infiniteBaseLimit,
- Stream: infiniteBaseLimit,
+var InfiniteLimits = ConcreteLimitConfig{
+ system: infiniteBaseLimit,
+ transient: infiniteBaseLimit,
+ allowlistedSystem: infiniteBaseLimit,
+ allowlistedTransient: infiniteBaseLimit,
+ serviceDefault: infiniteBaseLimit,
+ servicePeerDefault: infiniteBaseLimit,
+ protocolDefault: infiniteBaseLimit,
+ protocolPeerDefault: infiniteBaseLimit,
+ peerDefault: infiniteBaseLimit,
+ conn: infiniteBaseLimit,
+ stream: infiniteBaseLimit,
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs/stats.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs/stats.go
index 2b0b5cb72..bae3f0998 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs/stats.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs/stats.go
@@ -1,72 +1,132 @@
package obs
import (
- "context"
"strings"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/prometheus/client_golang/prometheus"
)
-var (
- metricNamespace = "rcmgr/"
- conns = stats.Int64(metricNamespace+"connections", "Number of Connections", stats.UnitDimensionless)
-
- peerConns = stats.Int64(metricNamespace+"peer/connections", "Number of connections this peer has", stats.UnitDimensionless)
- peerConnsNegative = stats.Int64(metricNamespace+"peer/connections_negative", "Number of connections this peer had. This is used to get the current connection number per peer histogram by subtracting this from the peer/connections histogram", stats.UnitDimensionless)
-
- streams = stats.Int64(metricNamespace+"streams", "Number of Streams", stats.UnitDimensionless)
-
- peerStreams = stats.Int64(metricNamespace+"peer/streams", "Number of streams this peer has", stats.UnitDimensionless)
- peerStreamsNegative = stats.Int64(metricNamespace+"peer/streams_negative", "Number of streams this peer had. This is used to get the current streams number per peer histogram by subtracting this from the peer/streams histogram", stats.UnitDimensionless)
-
- memory = stats.Int64(metricNamespace+"memory", "Amount of memory reserved as reported to the Resource Manager", stats.UnitDimensionless)
- peerMemory = stats.Int64(metricNamespace+"peer/memory", "Amount of memory currently reseved for peer", stats.UnitDimensionless)
- peerMemoryNegative = stats.Int64(metricNamespace+"peer/memory_negative", "Amount of memory previously reseved for peer. This is used to get the current memory per peer histogram by subtracting this from the peer/memory histogram", stats.UnitDimensionless)
-
- connMemory = stats.Int64(metricNamespace+"conn/memory", "Amount of memory currently reseved for the connection", stats.UnitDimensionless)
- connMemoryNegative = stats.Int64(metricNamespace+"conn/memory_negative", "Amount of memory previously reseved for the connection. This is used to get the current memory per connection histogram by subtracting this from the conn/memory histogram", stats.UnitDimensionless)
-
- fds = stats.Int64(metricNamespace+"fds", "Number of fds as reported to the Resource Manager", stats.UnitDimensionless)
-
- blockedResources = stats.Int64(metricNamespace+"blocked_resources", "Number of resource requests blocked", stats.UnitDimensionless)
-)
+const metricNamespace = "libp2p_rcmgr"
var (
- directionTag, _ = tag.NewKey("dir")
- scopeTag, _ = tag.NewKey("scope")
- serviceTag, _ = tag.NewKey("service")
- protocolTag, _ = tag.NewKey("protocol")
- resourceTag, _ = tag.NewKey("resource")
-)
-var (
- ConnView = &view.View{Measure: conns, Aggregation: view.Sum(), TagKeys: []tag.Key{directionTag, scopeTag}}
+ // Conns
+ conns = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "connections",
+ Help: "Number of Connections",
+ }, []string{"dir", "scope"})
- oneTenThenExpDistribution = []float64{
- 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1, 9.1, 10.1, 16.1, 32.1, 64.1, 128.1, 256.1,
- }
+ connsInboundSystem = conns.With(prometheus.Labels{"dir": "inbound", "scope": "system"})
+ connsInboundTransient = conns.With(prometheus.Labels{"dir": "inbound", "scope": "transient"})
+ connsOutboundSystem = conns.With(prometheus.Labels{"dir": "outbound", "scope": "system"})
+ connsOutboundTransient = conns.With(prometheus.Labels{"dir": "outbound", "scope": "transient"})
- PeerConnsView = &view.View{
- Measure: peerConns,
- Aggregation: view.Distribution(oneTenThenExpDistribution...),
- TagKeys: []tag.Key{directionTag},
- }
- PeerConnsNegativeView = &view.View{
- Measure: peerConnsNegative,
- Aggregation: view.Distribution(oneTenThenExpDistribution...),
- TagKeys: []tag.Key{directionTag},
+ oneTenThenExpDistributionBuckets = []float64{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32, 64, 128, 256,
}
- StreamView = &view.View{Measure: streams, Aggregation: view.Sum(), TagKeys: []tag.Key{directionTag, scopeTag, serviceTag, protocolTag}}
- PeerStreamsView = &view.View{Measure: peerStreams, Aggregation: view.Distribution(oneTenThenExpDistribution...), TagKeys: []tag.Key{directionTag}}
- PeerStreamNegativeView = &view.View{Measure: peerStreamsNegative, Aggregation: view.Distribution(oneTenThenExpDistribution...), TagKeys: []tag.Key{directionTag}}
-
- MemoryView = &view.View{Measure: memory, Aggregation: view.Sum(), TagKeys: []tag.Key{scopeTag, serviceTag, protocolTag}}
+ // PeerConns
+ peerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "peer_connections",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of connections this peer has",
+ }, []string{"dir"})
+ peerConnsInbound = peerConns.With(prometheus.Labels{"dir": "inbound"})
+ peerConnsOutbound = peerConns.With(prometheus.Labels{"dir": "outbound"})
+
+ // Lets us build a histogram of our current state. See https://github.com/libp2p/go-libp2p-resource-manager/pull/54#discussion_r911244757 for more information.
+ previousPeerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_peer_connections",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of connections this peer previously had. This is used to get the current connection number per peer histogram by subtracting this from the peer_connections histogram",
+ }, []string{"dir"})
+ previousPeerConnsInbound = previousPeerConns.With(prometheus.Labels{"dir": "inbound"})
+ previousPeerConnsOutbound = previousPeerConns.With(prometheus.Labels{"dir": "outbound"})
+
+ // Streams
+ streams = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "streams",
+ Help: "Number of Streams",
+ }, []string{"dir", "scope", "protocol"})
+
+ peerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "peer_streams",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of streams this peer has",
+ }, []string{"dir"})
+ peerStreamsInbound = peerStreams.With(prometheus.Labels{"dir": "inbound"})
+ peerStreamsOutbound = peerStreams.With(prometheus.Labels{"dir": "outbound"})
+
+ previousPeerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_peer_streams",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of streams this peer has",
+ }, []string{"dir"})
+ previousPeerStreamsInbound = previousPeerStreams.With(prometheus.Labels{"dir": "inbound"})
+ previousPeerStreamsOutbound = previousPeerStreams.With(prometheus.Labels{"dir": "outbound"})
+
+ // Memory
+ memory = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "memory",
+ Help: "Amount of memory reserved as reported to the Resource Manager",
+ }, []string{"scope", "protocol"})
+
+ // PeerMemory
+ peerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "peer_memory",
+ Buckets: memDistribution,
+ Help: "How many peers have reserved this bucket of memory, as reported to the Resource Manager",
+ })
+ previousPeerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_peer_memory",
+ Buckets: memDistribution,
+ Help: "How many peers have previously reserved this bucket of memory, as reported to the Resource Manager",
+ })
+
+ // ConnMemory
+ connMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "conn_memory",
+ Buckets: memDistribution,
+ Help: "How many conns have reserved this bucket of memory, as reported to the Resource Manager",
+ })
+ previousConnMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_conn_memory",
+ Buckets: memDistribution,
+ Help: "How many conns have previously reserved this bucket of memory, as reported to the Resource Manager",
+ })
+
+ // FDs
+ fds = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "fds",
+ Help: "Number of file descriptors reserved as reported to the Resource Manager",
+ }, []string{"scope"})
+
+ fdsSystem = fds.With(prometheus.Labels{"scope": "system"})
+ fdsTransient = fds.With(prometheus.Labels{"scope": "transient"})
+
+ // Blocked resources
+ blockedResources = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "blocked_resources",
+ Help: "Number of blocked resources",
+ }, []string{"dir", "scope", "resource"})
+)
+var (
memDistribution = []float64{
1 << 10, // 1KB
4 << 10, // 4KB
@@ -79,49 +139,26 @@ var (
2 << 30, // 2GB
4 << 30, // 4GB
}
- PeerMemoryView = &view.View{
- Measure: peerMemory,
- Aggregation: view.Distribution(memDistribution...),
- }
- PeerMemoryNegativeView = &view.View{
- Measure: peerMemoryNegative,
- Aggregation: view.Distribution(memDistribution...),
- }
-
- // Not setup yet. Memory isn't attached to a given connection.
- ConnMemoryView = &view.View{
- Measure: connMemory,
- Aggregation: view.Distribution(memDistribution...),
- }
- ConnMemoryNegativeView = &view.View{
- Measure: connMemoryNegative,
- Aggregation: view.Distribution(memDistribution...),
- }
-
- FDsView = &view.View{Measure: fds, Aggregation: view.Sum(), TagKeys: []tag.Key{scopeTag}}
-
- BlockedResourcesView = &view.View{
- Measure: blockedResources,
- Aggregation: view.Sum(),
- TagKeys: []tag.Key{scopeTag, resourceTag},
- }
)
-var DefaultViews []*view.View = []*view.View{
- ConnView,
- PeerConnsView,
- PeerConnsNegativeView,
- FDsView,
-
- StreamView,
- PeerStreamsView,
- PeerStreamNegativeView,
-
- MemoryView,
- PeerMemoryView,
- PeerMemoryNegativeView,
-
- BlockedResourcesView,
+func MustRegisterWith(reg prometheus.Registerer) {
+ reg.MustRegister(
+ conns,
+ peerConns,
+ previousPeerConns,
+ streams,
+ peerStreams,
+
+ previousPeerStreams,
+
+ memory,
+ peerMemory,
+ previousPeerMemory,
+ connMemory,
+ previousConnMemory,
+ fds,
+ blockedResources,
+ )
}
// StatsTraceReporter reports stats on the resource manager using its traces.
@@ -133,11 +170,17 @@ func NewStatsTraceReporter() (StatsTraceReporter, error) {
}
func (r StatsTraceReporter) ConsumeEvent(evt rcmgr.TraceEvt) {
- ctx := context.Background()
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ r.consumeEventWithLabelSlice(evt, tags)
+}
+
+// Separate func so that we can test that this function does not allocate. The syncPool may allocate.
+func (r StatsTraceReporter) consumeEventWithLabelSlice(evt rcmgr.TraceEvt, tags *[]string) {
switch evt.Type {
case rcmgr.TraceAddStreamEvt, rcmgr.TraceRemoveStreamEvt:
- if p := rcmgr.ParsePeerScopeName(evt.Name); p.Validate() == nil {
+ if p := rcmgr.PeerStrInScopeName(evt.Name); p != "" {
// Aggregated peer stats. Counts how many peers have N number of streams open.
// Uses two buckets aggregations. One to count how many streams the
// peer has now. The other to count the negative value, or how many
@@ -148,10 +191,10 @@ func (r StatsTraceReporter) ConsumeEvent(evt rcmgr.TraceEvt) {
peerStreamsOut := int64(evt.StreamsOut)
if oldStreamsOut != peerStreamsOut {
if oldStreamsOut != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "outbound")}, peerStreamsNegative.M(oldStreamsOut))
+ previousPeerStreamsOutbound.Observe(float64(oldStreamsOut))
}
if peerStreamsOut != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "outbound")}, peerStreams.M(peerStreamsOut))
+ peerStreamsOutbound.Observe(float64(peerStreamsOut))
}
}
@@ -159,46 +202,50 @@ func (r StatsTraceReporter) ConsumeEvent(evt rcmgr.TraceEvt) {
peerStreamsIn := int64(evt.StreamsIn)
if oldStreamsIn != peerStreamsIn {
if oldStreamsIn != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "inbound")}, peerStreamsNegative.M(oldStreamsIn))
+ previousPeerStreamsInbound.Observe(float64(oldStreamsIn))
}
if peerStreamsIn != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "inbound")}, peerStreams.M(peerStreamsIn))
+ peerStreamsInbound.Observe(float64(peerStreamsIn))
}
}
} else {
- var tags []tag.Mutator
- if rcmgr.IsSystemScope(evt.Name) || rcmgr.IsTransientScope(evt.Name) {
- tags = append(tags, tag.Upsert(scopeTag, evt.Name))
- } else if svc := rcmgr.ParseServiceScopeName(evt.Name); svc != "" {
- tags = append(tags, tag.Upsert(scopeTag, "service"), tag.Upsert(serviceTag, svc))
- } else if proto := rcmgr.ParseProtocolScopeName(evt.Name); proto != "" {
- tags = append(tags, tag.Upsert(scopeTag, "protocol"), tag.Upsert(protocolTag, proto))
- } else {
- // Not measuring connscope, servicepeer and protocolpeer. Lots of data, and
- // you can use aggregated peer stats + service stats to infer
- // this.
- break
- }
-
if evt.DeltaOut != 0 {
- stats.RecordWithTags(
- ctx,
- append([]tag.Mutator{tag.Upsert(directionTag, "outbound")}, tags...),
- streams.M(int64(evt.DeltaOut)),
- )
+ if rcmgr.IsSystemScope(evt.Name) || rcmgr.IsTransientScope(evt.Name) {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "outbound", evt.Name, "")
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsOut))
+ } else if proto := rcmgr.ParseProtocolScopeName(evt.Name); proto != "" {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "outbound", "protocol", proto)
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsOut))
+ } else {
+ // Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
+ // you can use aggregated peer stats + service stats to infer
+ // this.
+ break
+ }
}
if evt.DeltaIn != 0 {
- stats.RecordWithTags(
- ctx,
- append([]tag.Mutator{tag.Upsert(directionTag, "inbound")}, tags...),
- streams.M(int64(evt.DeltaIn)),
- )
+ if rcmgr.IsSystemScope(evt.Name) || rcmgr.IsTransientScope(evt.Name) {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "inbound", evt.Name, "")
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsIn))
+ } else if proto := rcmgr.ParseProtocolScopeName(evt.Name); proto != "" {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "inbound", "protocol", proto)
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsIn))
+ } else {
+ // Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
+ // you can use aggregated peer stats + service stats to infer
+ // this.
+ break
+ }
}
}
case rcmgr.TraceAddConnEvt, rcmgr.TraceRemoveConnEvt:
- if p := rcmgr.ParsePeerScopeName(evt.Name); p.Validate() == nil {
+ if p := rcmgr.PeerStrInScopeName(evt.Name); p != "" {
// Aggregated peer stats. Counts how many peers have N number of connections.
// Uses two buckets aggregations. One to count how many streams the
// peer has now. The other to count the negative value, or how many
@@ -209,10 +256,10 @@ func (r StatsTraceReporter) ConsumeEvent(evt rcmgr.TraceEvt) {
connsOut := int64(evt.ConnsOut)
if oldConnsOut != connsOut {
if oldConnsOut != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "outbound")}, peerConnsNegative.M(oldConnsOut))
+ previousPeerConnsOutbound.Observe(float64(oldConnsOut))
}
if connsOut != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "outbound")}, peerConns.M(connsOut))
+ peerConnsOutbound.Observe(float64(connsOut))
}
}
@@ -220,88 +267,72 @@ func (r StatsTraceReporter) ConsumeEvent(evt rcmgr.TraceEvt) {
connsIn := int64(evt.ConnsIn)
if oldConnsIn != connsIn {
if oldConnsIn != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "inbound")}, peerConnsNegative.M(oldConnsIn))
+ previousPeerConnsInbound.Observe(float64(oldConnsIn))
}
if connsIn != 0 {
- stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(directionTag, "inbound")}, peerConns.M(connsIn))
+ peerConnsInbound.Observe(float64(connsIn))
}
}
} else {
- var tags []tag.Mutator
- if rcmgr.IsSystemScope(evt.Name) || rcmgr.IsTransientScope(evt.Name) {
- tags = append(tags, tag.Upsert(scopeTag, evt.Name))
- } else if rcmgr.IsConnScope(evt.Name) {
+ if rcmgr.IsConnScope(evt.Name) {
// Not measuring this. I don't think it's useful.
break
- } else {
- // This could be a span
- break
}
- if evt.DeltaOut != 0 {
- stats.RecordWithTags(
- ctx,
- append([]tag.Mutator{tag.Upsert(directionTag, "outbound")}, tags...),
- conns.M(int64(evt.DeltaOut)),
- )
- }
-
- if evt.DeltaIn != 0 {
- stats.RecordWithTags(
- ctx,
- append([]tag.Mutator{tag.Upsert(directionTag, "inbound")}, tags...),
- conns.M(int64(evt.DeltaIn)),
- )
+ if rcmgr.IsSystemScope(evt.Name) {
+ connsInboundSystem.Set(float64(evt.ConnsIn))
+ connsOutboundSystem.Set(float64(evt.ConnsOut))
+ } else if rcmgr.IsTransientScope(evt.Name) {
+ connsInboundTransient.Set(float64(evt.ConnsIn))
+ connsOutboundTransient.Set(float64(evt.ConnsOut))
}
// Represents the delta in fds
if evt.Delta != 0 {
- stats.RecordWithTags(
- ctx,
- tags,
- fds.M(int64(evt.Delta)),
- )
+ if rcmgr.IsSystemScope(evt.Name) {
+ fdsSystem.Set(float64(evt.FD))
+ } else if rcmgr.IsTransientScope(evt.Name) {
+ fdsTransient.Set(float64(evt.FD))
+ }
}
}
+
case rcmgr.TraceReserveMemoryEvt, rcmgr.TraceReleaseMemoryEvt:
- if p := rcmgr.ParsePeerScopeName(evt.Name); p.Validate() == nil {
+ if p := rcmgr.PeerStrInScopeName(evt.Name); p != "" {
oldMem := evt.Memory - evt.Delta
if oldMem != evt.Memory {
if oldMem != 0 {
- stats.Record(ctx, peerMemoryNegative.M(oldMem))
+ previousPeerMemory.Observe(float64(oldMem))
}
if evt.Memory != 0 {
- stats.Record(ctx, peerMemory.M(evt.Memory))
+ peerMemory.Observe(float64(evt.Memory))
}
}
} else if rcmgr.IsConnScope(evt.Name) {
oldMem := evt.Memory - evt.Delta
if oldMem != evt.Memory {
if oldMem != 0 {
- stats.Record(ctx, connMemoryNegative.M(oldMem))
+ previousConnMemory.Observe(float64(oldMem))
}
if evt.Memory != 0 {
- stats.Record(ctx, connMemory.M(evt.Memory))
+ connMemory.Observe(float64(evt.Memory))
}
}
} else {
- var tags []tag.Mutator
if rcmgr.IsSystemScope(evt.Name) || rcmgr.IsTransientScope(evt.Name) {
- tags = append(tags, tag.Upsert(scopeTag, evt.Name))
- } else if svc := rcmgr.ParseServiceScopeName(evt.Name); svc != "" {
- tags = append(tags, tag.Upsert(scopeTag, "service"), tag.Upsert(serviceTag, svc))
+ *tags = (*tags)[:0]
+ *tags = append(*tags, evt.Name, "")
+ memory.WithLabelValues(*tags...).Set(float64(evt.Memory))
} else if proto := rcmgr.ParseProtocolScopeName(evt.Name); proto != "" {
- tags = append(tags, tag.Upsert(scopeTag, "protocol"), tag.Upsert(protocolTag, proto))
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "protocol", proto)
+ memory.WithLabelValues(*tags...).Set(float64(evt.Memory))
} else {
// Not measuring connscope, servicepeer and protocolpeer. Lots of data, and
// you can use aggregated peer stats + service stats to infer
// this.
break
}
-
- if evt.Delta != 0 {
- stats.RecordWithTags(ctx, tags, memory.M(int64(evt.Delta)))
- }
}
case rcmgr.TraceBlockAddConnEvt, rcmgr.TraceBlockAddStreamEvt, rcmgr.TraceBlockReserveMemoryEvt:
@@ -314,31 +345,40 @@ func (r StatsTraceReporter) ConsumeEvent(evt rcmgr.TraceEvt) {
resource = "memory"
}
+ scopeName := evt.Name
// Only the top scopeName. We don't want to get the peerid here.
- scopeName := strings.SplitN(evt.Name, ":", 2)[0]
+ // Using indexes and slices to avoid allocating.
+ scopeSplitIdx := strings.IndexByte(scopeName, ':')
+ if scopeSplitIdx != -1 {
+ scopeName = evt.Name[0:scopeSplitIdx]
+ }
// Drop the connection or stream id
- scopeName = strings.SplitN(scopeName, "-", 2)[0]
-
- // If something else gets added here, make sure to update the size hint
- // below when we make `tagsWithDir`.
- tags := []tag.Mutator{tag.Upsert(scopeTag, scopeName), tag.Upsert(resourceTag, resource)}
+ idSplitIdx := strings.IndexByte(scopeName, '-')
+ if idSplitIdx != -1 {
+ scopeName = scopeName[0:idSplitIdx]
+ }
if evt.DeltaIn != 0 {
- tagsWithDir := make([]tag.Mutator, 0, 3)
- tagsWithDir = append(tagsWithDir, tag.Insert(directionTag, "inbound"))
- tagsWithDir = append(tagsWithDir, tags...)
- stats.RecordWithTags(ctx, tagsWithDir[0:], blockedResources.M(int64(1)))
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "inbound", scopeName, resource)
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.DeltaIn))
}
if evt.DeltaOut != 0 {
- tagsWithDir := make([]tag.Mutator, 0, 3)
- tagsWithDir = append(tagsWithDir, tag.Insert(directionTag, "outbound"))
- tagsWithDir = append(tagsWithDir, tags...)
- stats.RecordWithTags(ctx, tagsWithDir, blockedResources.M(int64(1)))
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "outbound", scopeName, resource)
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.DeltaOut))
}
- if evt.Delta != 0 {
- stats.RecordWithTags(ctx, tags, blockedResources.M(1))
+ if evt.Delta != 0 && resource == "connection" {
+ // This represents fds blocked
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "", scopeName, "fd")
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.Delta))
+ } else if evt.Delta != 0 {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "", scopeName, resource)
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.Delta))
}
}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go
index 03d100a63..7f15bb768 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go
@@ -517,40 +517,20 @@ func peerScopeName(p peer.ID) string {
return fmt.Sprintf("peer:%s", p)
}
-// ParsePeerScopeName returns "" if name is not a peerScopeName
-func ParsePeerScopeName(name string) peer.ID {
+// PeerStrInScopeName returns "" if name is not a peerScopeName. Returns a string to avoid allocating a peer ID object
+func PeerStrInScopeName(name string) string {
if !strings.HasPrefix(name, "peer:") || IsSpan(name) {
return ""
}
- parts := strings.SplitN(name, "peer:", 2)
- if len(parts) != 2 {
- return ""
- }
- p, err := peer.Decode(parts[1])
- if err != nil {
+ // Index to avoid allocating a new string
+ peerSplitIdx := strings.Index(name, "peer:")
+ if peerSplitIdx == -1 {
return ""
}
+ p := (name[peerSplitIdx+len("peer:"):])
return p
}
-// ParseServiceScopeName returns the service name if name is a serviceScopeName.
-// Otherwise returns ""
-func ParseServiceScopeName(name string) string {
- if strings.HasPrefix(name, "service:") && !IsSpan(name) {
- if strings.Contains(name, "peer:") {
- // This is a service peer scope
- return ""
- }
- parts := strings.SplitN(name, ":", 2)
- if len(parts) != 2 {
- return ""
- }
-
- return parts[1]
- }
- return ""
-}
-
// ParseProtocolScopeName returns the service name if name is a serviceScopeName.
// Otherwise returns ""
func ParseProtocolScopeName(name string) string {
@@ -559,12 +539,13 @@ func ParseProtocolScopeName(name string) string {
// This is a protocol peer scope
return ""
}
- parts := strings.SplitN(name, ":", 2)
- if len(parts) != 2 {
- return ("")
- }
- return parts[1]
+ // Index to avoid allocating a new string
+ separatorIdx := strings.Index(name, ":")
+ if separatorIdx == -1 {
+ return ""
+ }
+ return name[separatorIdx+1:]
}
return ""
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go
index 872f0eb68..60089c3a5 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go
@@ -2,6 +2,8 @@ package rcmgr
import (
"fmt"
+ "math"
+ "math/big"
"strings"
"sync"
@@ -79,15 +81,56 @@ func IsSpan(name string) bool {
return strings.Contains(name, ".span-")
}
+func addInt64WithOverflow(a int64, b int64) (c int64, ok bool) {
+ c = a + b
+ return c, (c > a) == (b > 0)
+}
+
+// mulInt64WithOverflow checks for overflow in multiplying two int64s. See
+// https://groups.google.com/g/golang-nuts/c/h5oSN5t3Au4/m/KaNQREhZh0QJ
+func mulInt64WithOverflow(a, b int64) (c int64, ok bool) {
+ const mostPositive = 1<<63 - 1
+ const mostNegative = -(mostPositive + 1)
+ c = a * b
+ if a == 0 || b == 0 || a == 1 || b == 1 {
+ return c, true
+ }
+ if a == mostNegative || b == mostNegative {
+ return c, false
+ }
+ return c, c/b == a
+}
+
// Resources implementation
func (rc *resources) checkMemory(rsvp int64, prio uint8) error {
- // overflow check; this also has the side effect that we cannot reserve negative memory.
- newmem := rc.memory + rsvp
+ if rsvp < 0 {
+ return fmt.Errorf("can't reserve negative memory. rsvp=%v", rsvp)
+ }
+
limit := rc.limit.GetMemoryLimit()
- threshold := (1 + int64(prio)) * limit / 256
+ if limit == math.MaxInt64 {
+ // Special case where we've set max limits.
+ return nil
+ }
+
+ newmem, addOk := addInt64WithOverflow(rc.memory, rsvp)
+
+ threshold, mulOk := mulInt64WithOverflow(1+int64(prio), limit)
+ if !mulOk {
+ thresholdBig := big.NewInt(limit)
+ thresholdBig = thresholdBig.Mul(thresholdBig, big.NewInt(1+int64(prio)))
+ thresholdBig.Rsh(thresholdBig, 8) // Divide 256
+ if !thresholdBig.IsInt64() {
+ // Shouldn't happen since the threshold can only be <= limit
+ threshold = limit
+ }
+ threshold = thresholdBig.Int64()
+ } else {
+ threshold = threshold / 256
+ }
- if newmem > threshold {
- return &errMemoryLimitExceeded{
+ if !addOk || newmem > threshold {
+ return &ErrMemoryLimitExceeded{
current: rc.memory,
attempted: rsvp,
limit: limit,
@@ -128,7 +171,7 @@ func (rc *resources) addStreams(incount, outcount int) error {
if incount > 0 {
limit := rc.limit.GetStreamLimit(network.DirInbound)
if rc.nstreamsIn+incount > limit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nstreamsIn,
attempted: incount,
limit: limit,
@@ -139,7 +182,7 @@ func (rc *resources) addStreams(incount, outcount int) error {
if outcount > 0 {
limit := rc.limit.GetStreamLimit(network.DirOutbound)
if rc.nstreamsOut+outcount > limit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nstreamsOut,
attempted: outcount,
limit: limit,
@@ -149,7 +192,7 @@ func (rc *resources) addStreams(incount, outcount int) error {
}
if limit := rc.limit.GetStreamTotalLimit(); rc.nstreamsIn+incount+rc.nstreamsOut+outcount > limit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nstreamsIn + rc.nstreamsOut,
attempted: incount + outcount,
limit: limit,
@@ -201,7 +244,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error {
if incount > 0 {
limit := rc.limit.GetConnLimit(network.DirInbound)
if rc.nconnsIn+incount > limit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nconnsIn,
attempted: incount,
limit: limit,
@@ -212,7 +255,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error {
if outcount > 0 {
limit := rc.limit.GetConnLimit(network.DirOutbound)
if rc.nconnsOut+outcount > limit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nconnsOut,
attempted: outcount,
limit: limit,
@@ -222,7 +265,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error {
}
if connLimit := rc.limit.GetConnTotalLimit(); rc.nconnsIn+incount+rc.nconnsOut+outcount > connLimit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nconnsIn + rc.nconnsOut,
attempted: incount + outcount,
limit: connLimit,
@@ -232,7 +275,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error {
if fdcount > 0 {
limit := rc.limit.GetFDLimit()
if rc.nfd+fdcount > limit {
- return &errStreamOrConnLimitExceeded{
+ return &ErrStreamOrConnLimitExceeded{
current: rc.nfd,
attempted: fdcount,
limit: limit,
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go
index 75d4f7f4f..50042c1fe 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go
@@ -1,5 +1,4 @@
//go:build linux || darwin
-// +build linux darwin
package rcmgr
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go
index 55c8d3474..eb8e58ee7 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go
@@ -107,7 +107,39 @@ func (rh *RoutedHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
// if we're here, we got some addrs. let's use our wrapped host to connect.
pi.Addrs = addrs
- return rh.host.Connect(ctx, pi)
+ if cerr := rh.host.Connect(ctx, pi); cerr != nil {
+ // We couldn't connect. Let's check if we have the most
+ // up-to-date addresses for the given peer. If there
+ // are addresses we didn't know about previously, we
+ // try to connect again.
+ newAddrs, err := rh.findPeerAddrs(ctx, pi.ID)
+ if err != nil {
+ log.Debugf("failed to find more peer addresses %s: %s", pi.ID, err)
+ return cerr
+ }
+
+ // Build lookup map
+ lookup := make(map[string]struct{}, len(addrs))
+ for _, addr := range addrs {
+ lookup[string(addr.Bytes())] = struct{}{}
+ }
+
+ // if there's any address that's not in the previous set
+ // of addresses, try to connect again. If all addresses
+ // where known previously we return the original error.
+ for _, newAddr := range newAddrs {
+ if _, found := lookup[string(newAddr.Bytes())]; found {
+ continue
+ }
+
+ pi.Addrs = newAddrs
+ return rh.host.Connect(ctx, pi)
+ }
+ // No appropriate new address found.
+ // Return the original dial error.
+ return cerr
+ }
+ return nil
}
func (rh *RoutedHost) findPeerAddrs(ctx context.Context, id peer.ID) ([]ma.Multiaddr, error) {
@@ -157,7 +189,7 @@ func (rh *RoutedHost) SetStreamHandler(pid protocol.ID, handler network.StreamHa
rh.host.SetStreamHandler(pid, handler)
}
-func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler network.StreamHandler) {
+func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
rh.host.SetStreamHandlerMatch(pid, m, handler)
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go
new file mode 100644
index 000000000..ef367ac9b
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go
@@ -0,0 +1,29 @@
+package metricshelper
+
+import ma "github.com/multiformats/go-multiaddr"
+
+var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP}
+
+func GetTransport(a ma.Multiaddr) string {
+ for _, t := range transports {
+ if _, err := a.ValueForProtocol(t); err == nil {
+ return ma.ProtocolWithCode(t).Name
+ }
+ }
+ return "other"
+}
+
+func GetIPVersion(addr ma.Multiaddr) string {
+ version := "unknown"
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_IP4 {
+ version = "ip4"
+ return false
+ } else if c.Protocol().Code == ma.P_IP6 {
+ version = "ip6"
+ return false
+ }
+ return true
+ })
+ return version
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go
new file mode 100644
index 000000000..2f89b951c
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go
@@ -0,0 +1,14 @@
+package metricshelper
+
+import "github.com/libp2p/go-libp2p/core/network"
+
+func GetDirection(dir network.Direction) string {
+ switch dir {
+ case network.DirOutbound:
+ return "outbound"
+ case network.DirInbound:
+ return "inbound"
+ default:
+ return "unknown"
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go
new file mode 100644
index 000000000..3290ed5a0
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go
@@ -0,0 +1,26 @@
+package metricshelper
+
+import (
+ "fmt"
+ "sync"
+)
+
+const capacity = 8
+
+var stringPool = sync.Pool{New: func() any {
+ s := make([]string, 0, capacity)
+ return &s
+}}
+
+func GetStringSlice() *[]string {
+ s := stringPool.Get().(*[]string)
+ *s = (*s)[:0]
+ return s
+}
+
+func PutStringSlice(s *[]string) {
+ if c := cap(*s); c < capacity {
+ panic(fmt.Sprintf("expected a string slice with capacity 8 or greater, got %d", c))
+ }
+ stringPool.Put(s)
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go
new file mode 100644
index 000000000..99027c0db
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go
@@ -0,0 +1,20 @@
+package metricshelper
+
+import (
+ "errors"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// RegisterCollectors registers the collectors with reg ignoring
+// reregistration error and panics on any other error
+func RegisterCollectors(reg prometheus.Registerer, collectors ...prometheus.Collector) {
+ for _, c := range collectors {
+ err := reg.Register(c)
+ if err != nil {
+ if ok := errors.As(err, &prometheus.AlreadyRegisteredError{}); !ok {
+ panic(err)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/muxer-multistream/multistream.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/muxer-multistream/multistream.go
deleted file mode 100644
index bf9d41630..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/muxer-multistream/multistream.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Package muxer_multistream implements a peerstream transport using
-// go-multistream to select the underlying stream muxer
-package muxer_multistream
-
-import (
- "fmt"
- "net"
- "time"
-
- "github.com/libp2p/go-libp2p/core/network"
-
- mss "github.com/multiformats/go-multistream"
-)
-
-var DefaultNegotiateTimeout = time.Second * 60
-
-type Transport struct {
- mux *mss.MultistreamMuxer
-
- tpts map[string]network.Multiplexer
-
- NegotiateTimeout time.Duration
-
- OrderPreference []string
-}
-
-func NewBlankTransport() *Transport {
- return &Transport{
- mux: mss.NewMultistreamMuxer(),
- tpts: make(map[string]network.Multiplexer),
- NegotiateTimeout: DefaultNegotiateTimeout,
- }
-}
-
-func (t *Transport) AddTransport(path string, tpt network.Multiplexer) {
- t.mux.AddHandler(path, nil)
- t.tpts[path] = tpt
- t.OrderPreference = append(t.OrderPreference, path)
-}
-
-func (t *Transport) NewConn(nc net.Conn, isServer bool, scope network.PeerScope) (network.MuxedConn, error) {
- if t.NegotiateTimeout != 0 {
- if err := nc.SetDeadline(time.Now().Add(t.NegotiateTimeout)); err != nil {
- return nil, err
- }
- }
-
- var proto string
- if isServer {
- selected, _, err := t.mux.Negotiate(nc)
- if err != nil {
- return nil, err
- }
- proto = selected
- } else {
- selected, err := mss.SelectOneOf(t.OrderPreference, nc)
- if err != nil {
- return nil, err
- }
- proto = selected
- }
-
- if t.NegotiateTimeout != 0 {
- if err := nc.SetDeadline(time.Time{}); err != nil {
- return nil, err
- }
- }
-
- tpt, ok := t.tpts[proto]
- if !ok {
- return nil, fmt.Errorf("selected protocol we don't have a transport for")
- }
-
- return tpt.NewConn(nc, isServer, scope)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/conn.go
index 30f9f351b..40c4af405 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/conn.go
@@ -5,7 +5,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-yamux/v3"
+ "github.com/libp2p/go-yamux/v4"
)
// conn implements mux.MuxedConn over yamux.Session.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/stream.go
index e6a3a04bc..b50bc0bb8 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/stream.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/stream.go
@@ -5,7 +5,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-yamux/v3"
+ "github.com/libp2p/go-yamux/v4"
)
// stream implements mux.MuxedStream over yamux.Stream.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go
index bba53d7ac..327383633 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go
@@ -7,11 +7,13 @@ import (
"github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-yamux/v3"
+ "github.com/libp2p/go-yamux/v4"
)
var DefaultTransport *Transport
+const ID = "/yamux/1.0.0"
+
func init() {
config := yamux.DefaultConfig()
// We've bumped this to 16MiB as this critically limits throughput.
@@ -38,12 +40,17 @@ type Transport yamux.Config
var _ network.Multiplexer = &Transport{}
func (t *Transport) NewConn(nc net.Conn, isServer bool, scope network.PeerScope) (network.MuxedConn, error) {
+ var newSpan func() (yamux.MemoryManager, error)
+ if scope != nil {
+ newSpan = func() (yamux.MemoryManager, error) { return scope.BeginSpan() }
+ }
+
var s *yamux.Session
var err error
if isServer {
- s, err = yamux.Server(nc, t.Config(), scope)
+ s, err = yamux.Server(nc, t.Config(), newSpan)
} else {
- s, err = yamux.Client(nc, t.Config(), scope)
+ s, err = yamux.Client(nc, t.Config(), newSpan)
}
if err != nil {
return nil, err
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/conn-security-multistream/ssms.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/conn-security-multistream/ssms.go
deleted file mode 100644
index 595d8dfde..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/conn-security-multistream/ssms.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package csms
-
-import (
- "context"
- "fmt"
- "log"
- "net"
-
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/sec"
-
- mss "github.com/multiformats/go-multistream"
-)
-
-// SSMuxer is a multistream stream security transport multiplexer.
-//
-// SSMuxer is safe to use without initialization. However, it's not safe to move
-// after use.
-type SSMuxer struct {
- mux mss.MultistreamMuxer
- tpts map[string]sec.SecureTransport
- OrderPreference []string
-}
-
-var _ sec.SecureMuxer = (*SSMuxer)(nil)
-
-// AddTransport adds a stream security transport to this multistream muxer.
-//
-// This method is *not* thread-safe. It should be called only when initializing
-// the SSMuxer.
-func (sm *SSMuxer) AddTransport(path string, transport sec.SecureTransport) {
- if sm.tpts == nil {
- sm.tpts = make(map[string]sec.SecureTransport, 1)
- }
-
- sm.mux.AddHandler(path, nil)
- sm.tpts[path] = transport
- sm.OrderPreference = append(sm.OrderPreference, path)
-}
-
-// SecureInbound secures an inbound connection using this multistream
-// multiplexed stream security transport.
-func (sm *SSMuxer) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, bool, error) {
- tpt, _, err := sm.selectProto(ctx, insecure, true)
- if err != nil {
- return nil, false, err
- }
- sconn, err := tpt.SecureInbound(ctx, insecure, p)
- return sconn, true, err
-}
-
-// SecureOutbound secures an outbound connection using this multistream
-// multiplexed stream security transport.
-func (sm *SSMuxer) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, bool, error) {
- tpt, server, err := sm.selectProto(ctx, insecure, false)
- if err != nil {
- return nil, false, err
- }
-
- var sconn sec.SecureConn
- if server {
- sconn, err = tpt.SecureInbound(ctx, insecure, p)
- if err != nil {
- return nil, false, fmt.Errorf("failed to secure inbound connection: %s", err)
- }
- // ensure the correct peer connected to us
- if sconn.RemotePeer() != p {
- sconn.Close()
- log.Printf("Handshake failed to properly authenticate peer. Authenticated %s, expected %s.", sconn.RemotePeer(), p)
- return nil, false, fmt.Errorf("unexpected peer")
- }
- } else {
- sconn, err = tpt.SecureOutbound(ctx, insecure, p)
- }
-
- return sconn, server, err
-}
-
-func (sm *SSMuxer) selectProto(ctx context.Context, insecure net.Conn, server bool) (sec.SecureTransport, bool, error) {
- var proto string
- var err error
- var iamserver bool
- done := make(chan struct{})
- go func() {
- defer close(done)
- if server {
- iamserver = true
- proto, _, err = sm.mux.Negotiate(insecure)
- } else {
- proto, iamserver, err = mss.SelectWithSimopenOrFail(sm.OrderPreference, insecure)
- }
- }()
-
- select {
- case <-done:
- if err != nil {
- return nil, false, err
- }
- if tpt, ok := sm.tpts[proto]; ok {
- return tpt, iamserver, nil
- }
- return nil, false, fmt.Errorf("selected unknown security transport")
- case <-ctx.Done():
- // We *must* do this. We have outstanding work on the connection
- // and it's no longer safe to use.
- insecure.Close()
- <-done // wait to stop using the connection.
- return nil, false, ctx.Err()
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go
index 22b83c44e..b42a122fa 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go
@@ -7,6 +7,7 @@ import (
"sync/atomic"
"time"
+ "github.com/benbjohnson/clock"
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@@ -27,6 +28,8 @@ var log = logging.Logger("connmgr")
type BasicConnMgr struct {
*decayer
+ clock clock.Clock
+
cfg *config
segments segments
@@ -35,7 +38,7 @@ type BasicConnMgr struct {
// channel-based semaphore that enforces only a single trim is in progress
trimMutex sync.Mutex
- connCount int32
+ connCount atomic.Int32
// to be accessed atomically. This is mimicking the implementation of a sync.Once.
// Take care of correct alignment when modifying this struct.
trimCount uint64
@@ -59,14 +62,21 @@ type segment struct {
peers map[peer.ID]*peerInfo
}
-type segments [256]*segment
+type segments struct {
+ // bucketsMu is used to prevent deadlocks when concurrent processes try to
+ // grab multiple segment locks at once. If you need multiple segment locks
+ // at once, you should grab this lock first. You may release this lock once
+ // you have the segment locks.
+ bucketsMu sync.Mutex
+ buckets [256]*segment
+}
func (ss *segments) get(p peer.ID) *segment {
- return ss[byte(p[len(p)-1])]
+ return ss.buckets[byte(p[len(p)-1])]
}
func (ss *segments) countPeers() (count int) {
- for _, seg := range ss {
+ for _, seg := range ss.buckets {
seg.Lock()
count += len(seg.peers)
seg.Unlock()
@@ -74,7 +84,7 @@ func (ss *segments) countPeers() (count int) {
return count
}
-func (s *segment) tagInfoFor(p peer.ID) *peerInfo {
+func (s *segment) tagInfoFor(p peer.ID, now time.Time) *peerInfo {
pi, ok := s.peers[p]
if ok {
return pi
@@ -82,7 +92,7 @@ func (s *segment) tagInfoFor(p peer.ID) *peerInfo {
// create a temporary peer to buffer early tags before the Connected notification arrives.
pi = &peerInfo{
id: p,
- firstSeen: time.Now(), // this timestamp will be updated when the first Connected notification arrives.
+ firstSeen: now, // this timestamp will be updated when the first Connected notification arrives.
temp: true,
tags: make(map[string]int),
decaying: make(map[*decayingTag]*connmgr.DecayingValue),
@@ -102,6 +112,7 @@ func NewConnManager(low, hi int, opts ...Option) (*BasicConnMgr, error) {
lowWater: low,
gracePeriod: time.Minute,
silencePeriod: 10 * time.Second,
+ clock: clock.New(),
}
for _, o := range opts {
if err := o(cfg); err != nil {
@@ -116,16 +127,17 @@ func NewConnManager(low, hi int, opts ...Option) (*BasicConnMgr, error) {
cm := &BasicConnMgr{
cfg: cfg,
+ clock: cfg.clock,
protected: make(map[peer.ID]map[string]struct{}, 16),
- segments: func() (ret segments) {
- for i := range ret {
- ret[i] = &segment{
- peers: make(map[peer.ID]*peerInfo),
- }
- }
- return ret
- }(),
+ segments: segments{},
}
+
+ for i := range cm.segments.buckets {
+ cm.segments.buckets[i] = &segment{
+ peers: make(map[peer.ID]*peerInfo),
+ }
+ }
+
cm.ctx, cm.cancel = context.WithCancel(context.Background())
if cfg.emergencyTrim {
@@ -146,7 +158,7 @@ func NewConnManager(low, hi int, opts ...Option) (*BasicConnMgr, error) {
// We don't pay attention to the silence period or the grace period.
// We try to not kill protected connections, but if that turns out to be necessary, not connection is safe!
func (cm *BasicConnMgr) memoryEmergency() {
- connCount := int(atomic.LoadInt32(&cm.connCount))
+ connCount := int(cm.connCount.Load())
target := connCount - cm.cfg.lowWater
if target < 0 {
log.Warnw("Low on memory, but we only have a few connections", "num", connCount, "low watermark", cm.cfg.lowWater)
@@ -167,7 +179,7 @@ func (cm *BasicConnMgr) memoryEmergency() {
// finally, update the last trim time.
cm.lastTrimMu.Lock()
- cm.lastTrim = time.Now()
+ cm.lastTrim = cm.clock.Now()
cm.lastTrimMu.Unlock()
}
@@ -241,23 +253,32 @@ type peerInfo struct {
firstSeen time.Time // timestamp when we began tracking this peer.
}
-type peerInfos []peerInfo
+type peerInfos []*peerInfo
-func (p peerInfos) SortByValue() {
+// SortByValueAndStreams sorts peerInfos by their value and stream count. It
+// will sort peers with no streams before those with streams (all else being
+// equal). If `sortByMoreStreams` is true it will sort peers with more streams
+// before those with fewer streams. This is useful to prioritize freeing memory.
+func (p peerInfos) SortByValueAndStreams(segments *segments, sortByMoreStreams bool) {
sort.Slice(p, func(i, j int) bool {
left, right := p[i], p[j]
- // temporary peers are preferred for pruning.
- if left.temp != right.temp {
- return left.temp
+
+ // Grab this lock so that we can grab both segment locks below without deadlocking.
+ segments.bucketsMu.Lock()
+
+ // lock this to protect from concurrent modifications from connect/disconnect events
+ leftSegment := segments.get(left.id)
+ leftSegment.Lock()
+ defer leftSegment.Unlock()
+
+ rightSegment := segments.get(right.id)
+ if leftSegment != rightSegment {
+ // These two peers are not in the same segment, lets get the lock
+ rightSegment.Lock()
+ defer rightSegment.Unlock()
}
- // otherwise, compare by value.
- return left.value < right.value
- })
-}
+ segments.bucketsMu.Unlock()
-func (p peerInfos) SortByValueAndStreams() {
- sort.Slice(p, func(i, j int) bool {
- left, right := p[i], p[j]
// temporary peers are preferred for pruning.
if left.temp != right.temp {
return left.temp
@@ -278,12 +299,21 @@ func (p peerInfos) SortByValueAndStreams() {
}
leftIncoming, leftStreams := incomingAndStreams(left.conns)
rightIncoming, rightStreams := incomingAndStreams(right.conns)
+ // prefer closing inactive connections (no streams open)
+ if rightStreams != leftStreams && (leftStreams == 0 || rightStreams == 0) {
+ return leftStreams < rightStreams
+ }
// incoming connections are preferred for pruning
if leftIncoming != rightIncoming {
return leftIncoming
}
- // prune connections with a higher number of streams first
- return rightStreams < leftStreams
+
+ if sortByMoreStreams {
+ // prune connections with a higher number of streams first
+ return rightStreams < leftStreams
+ } else {
+ return leftStreams < rightStreams
+ }
})
}
@@ -310,13 +340,13 @@ func (cm *BasicConnMgr) background() {
interval = cm.cfg.silencePeriod
}
- ticker := time.NewTicker(interval)
+ ticker := cm.clock.Ticker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
- if atomic.LoadInt32(&cm.connCount) < int32(cm.cfg.highWater) {
+ if cm.connCount.Load() < int32(cm.cfg.highWater) {
// Below high water, skip.
continue
}
@@ -335,7 +365,7 @@ func (cm *BasicConnMgr) doTrim() {
if count == atomic.LoadUint64(&cm.trimCount) {
cm.trim()
cm.lastTrimMu.Lock()
- cm.lastTrim = time.Now()
+ cm.lastTrim = cm.clock.Now()
cm.lastTrimMu.Unlock()
atomic.AddUint64(&cm.trimCount, 1)
}
@@ -345,7 +375,7 @@ func (cm *BasicConnMgr) doTrim() {
func (cm *BasicConnMgr) trim() {
// do the actual trim.
for _, c := range cm.getConnsToClose() {
- log.Infow("closing conn", "peer", c.RemotePeer())
+ log.Debugw("closing conn", "peer", c.RemotePeer())
c.Close()
}
}
@@ -354,31 +384,34 @@ func (cm *BasicConnMgr) getConnsToCloseEmergency(target int) []network.Conn {
candidates := make(peerInfos, 0, cm.segments.countPeers())
cm.plk.RLock()
- for _, s := range cm.segments {
+ for _, s := range cm.segments.buckets {
s.Lock()
for id, inf := range s.peers {
if _, ok := cm.protected[id]; ok {
// skip over protected peer.
continue
}
- candidates = append(candidates, *inf)
+ candidates = append(candidates, inf)
}
s.Unlock()
}
cm.plk.RUnlock()
// Sort peers according to their value.
- candidates.SortByValueAndStreams()
+ candidates.SortByValueAndStreams(&cm.segments, true)
selected := make([]network.Conn, 0, target+10)
for _, inf := range candidates {
if target <= 0 {
break
}
+ s := cm.segments.get(inf.id)
+ s.Lock()
for c := range inf.conns {
selected = append(selected, c)
}
target -= len(inf.conns)
+ s.Unlock()
}
if len(selected) >= target {
// We found enough connections that were not protected.
@@ -389,24 +422,28 @@ func (cm *BasicConnMgr) getConnsToCloseEmergency(target int) []network.Conn {
// We have no choice but to kill some protected connections.
candidates = candidates[:0]
cm.plk.RLock()
- for _, s := range cm.segments {
+ for _, s := range cm.segments.buckets {
s.Lock()
for _, inf := range s.peers {
- candidates = append(candidates, *inf)
+ candidates = append(candidates, inf)
}
s.Unlock()
}
cm.plk.RUnlock()
- candidates.SortByValueAndStreams()
+ candidates.SortByValueAndStreams(&cm.segments, true)
for _, inf := range candidates {
if target <= 0 {
break
}
+ // lock this to protect from concurrent modifications from connect/disconnect events
+ s := cm.segments.get(inf.id)
+ s.Lock()
for c := range inf.conns {
selected = append(selected, c)
}
target -= len(inf.conns)
+ s.Unlock()
}
return selected
}
@@ -419,17 +456,17 @@ func (cm *BasicConnMgr) getConnsToClose() []network.Conn {
return nil
}
- if int(atomic.LoadInt32(&cm.connCount)) <= cm.cfg.lowWater {
+ if int(cm.connCount.Load()) <= cm.cfg.lowWater {
log.Info("open connection count below limit")
return nil
}
candidates := make(peerInfos, 0, cm.segments.countPeers())
var ncandidates int
- gracePeriodStart := time.Now().Add(-cm.cfg.gracePeriod)
+ gracePeriodStart := cm.clock.Now().Add(-cm.cfg.gracePeriod)
cm.plk.RLock()
- for _, s := range cm.segments {
+ for _, s := range cm.segments.buckets {
s.Lock()
for id, inf := range s.peers {
if _, ok := cm.protected[id]; ok {
@@ -442,7 +479,7 @@ func (cm *BasicConnMgr) getConnsToClose() []network.Conn {
}
// note that we're copying the entry here,
// but since inf.conns is a map, it will still point to the original object
- candidates = append(candidates, *inf)
+ candidates = append(candidates, inf)
ncandidates += len(inf.conns)
}
s.Unlock()
@@ -459,7 +496,7 @@ func (cm *BasicConnMgr) getConnsToClose() []network.Conn {
}
// Sort peers according to their value.
- candidates.SortByValue()
+ candidates.SortByValueAndStreams(&cm.segments, false)
target := ncandidates - cm.cfg.lowWater
@@ -528,7 +565,7 @@ func (cm *BasicConnMgr) TagPeer(p peer.ID, tag string, val int) {
s.Lock()
defer s.Unlock()
- pi := s.tagInfoFor(p)
+ pi := s.tagInfoFor(p, cm.clock.Now())
// Update the total value of the peer.
pi.value += val - pi.tags[tag]
@@ -558,7 +595,7 @@ func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {
s.Lock()
defer s.Unlock()
- pi := s.tagInfoFor(p)
+ pi := s.tagInfoFor(p, cm.clock.Now())
oldval := pi.tags[tag]
newval := upsert(oldval)
@@ -595,7 +632,7 @@ func (cm *BasicConnMgr) GetInfo() CMInfo {
LowWater: cm.cfg.lowWater,
LastTrim: lastTrim,
GracePeriod: cm.cfg.gracePeriod,
- ConnCount: int(atomic.LoadInt32(&cm.connCount)),
+ ConnCount: int(cm.connCount.Load()),
}
}
@@ -628,7 +665,7 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
if !ok {
pinfo = &peerInfo{
id: id,
- firstSeen: time.Now(),
+ firstSeen: cm.clock.Now(),
tags: make(map[string]int),
decaying: make(map[*decayingTag]*connmgr.DecayingValue),
conns: make(map[network.Conn]time.Time),
@@ -639,7 +676,7 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
// Connected notification arrived: flip the temporary flag, and update the firstSeen
// timestamp to the real one.
pinfo.temp = false
- pinfo.firstSeen = time.Now()
+ pinfo.firstSeen = cm.clock.Now()
}
_, ok = pinfo.conns[c]
@@ -648,8 +685,8 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
return
}
- pinfo.conns[c] = time.Now()
- atomic.AddInt32(&cm.connCount, 1)
+ pinfo.conns[c] = cm.clock.Now()
+ cm.connCount.Add(1)
}
// Disconnected is called by notifiers to inform that an existing connection has been closed or terminated.
@@ -678,7 +715,7 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
if len(cinf.conns) == 0 {
delete(s.peers, p)
}
- atomic.AddInt32(&cm.connCount, -1)
+ cm.connCount.Add(-1)
}
// Listen is no-op in this implementation.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go
index 9841c2f1f..bdac0bef7 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go
@@ -38,7 +38,7 @@ type decayer struct {
knownTags map[string]*decayingTag
// lastTick stores the last time the decayer ticked. Guarded by atomic.
- lastTick atomic.Value
+ lastTick atomic.Pointer[time.Time]
// bumpTagCh queues bump commands to be processed by the loop.
bumpTagCh chan bumpCmd
@@ -89,7 +89,8 @@ func NewDecayer(cfg *DecayerCfg, mgr *BasicConnMgr) (*decayer, error) {
doneCh: make(chan struct{}),
}
- d.lastTick.Store(d.clock.Now())
+ now := d.clock.Now()
+ d.lastTick.Store(&now)
// kick things off.
go d.process()
@@ -116,7 +117,7 @@ func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decay
"some precision may be lost", name, interval, d.cfg.Resolution)
}
- lastTick := d.lastTick.Load().(time.Time)
+ lastTick := d.lastTick.Load()
tag := &decayingTag{
trkr: d,
name: name,
@@ -156,14 +157,14 @@ func (d *decayer) process() {
var (
bmp bumpCmd
- now time.Time
visit = make(map[*decayingTag]struct{})
)
for {
select {
- case now = <-ticker.C:
- d.lastTick.Store(now)
+ case <-ticker.C:
+ now := d.clock.Now()
+ d.lastTick.Store(&now)
d.tagsMu.Lock()
for _, tag := range d.knownTags {
@@ -177,7 +178,7 @@ func (d *decayer) process() {
d.tagsMu.Unlock()
// Visit each peer, and decay tags that need to be decayed.
- for _, s := range d.mgr.segments {
+ for _, s := range d.mgr.segments.buckets {
s.Lock()
// Entered a segment that contains peers. Process each peer.
@@ -221,7 +222,7 @@ func (d *decayer) process() {
s := d.mgr.segments.get(peer)
s.Lock()
- p := s.tagInfoFor(peer)
+ p := s.tagInfoFor(peer, d.clock.Now())
v, ok := p.decaying[tag]
if !ok {
v = &connmgr.DecayingValue{
@@ -244,7 +245,7 @@ func (d *decayer) process() {
s := d.mgr.segments.get(rm.peer)
s.Lock()
- p := s.tagInfoFor(rm.peer)
+ p := s.tagInfoFor(rm.peer, d.clock.Now())
v, ok := p.decaying[rm.tag]
if !ok {
s.Unlock()
@@ -261,7 +262,7 @@ func (d *decayer) process() {
d.tagsMu.Unlock()
// Remove the tag from all peers that had it in the connmgr.
- for _, s := range d.mgr.segments {
+ for _, s := range d.mgr.segments.buckets {
// visit all segments, and attempt to remove the tag from all the peers it stores.
s.Lock()
for _, p := range s.peers {
@@ -291,8 +292,8 @@ type decayingTag struct {
bumpFn connmgr.BumpFn
// closed marks this tag as closed, so that if it's bumped after being
- // closed, we can return an error. 0 = false; 1 = true; guarded by atomic.
- closed int32
+ // closed, we can return an error.
+ closed atomic.Bool
}
var _ connmgr.DecayingTag = (*decayingTag)(nil)
@@ -307,7 +308,7 @@ func (t *decayingTag) Interval() time.Duration {
// Bump bumps a tag for this peer.
func (t *decayingTag) Bump(p peer.ID, delta int) error {
- if atomic.LoadInt32(&t.closed) == 1 {
+ if t.closed.Load() {
return fmt.Errorf("decaying tag %s had been closed; no further bumps are accepted", t.name)
}
@@ -324,7 +325,7 @@ func (t *decayingTag) Bump(p peer.ID, delta int) error {
}
func (t *decayingTag) Remove(p peer.ID) error {
- if atomic.LoadInt32(&t.closed) == 1 {
+ if t.closed.Load() {
return fmt.Errorf("decaying tag %s had been closed; no further removals are accepted", t.name)
}
@@ -341,7 +342,7 @@ func (t *decayingTag) Remove(p peer.ID) error {
}
func (t *decayingTag) Close() error {
- if !atomic.CompareAndSwapInt32(&t.closed, 0, 1) {
+ if !t.closed.CompareAndSwap(false, true) {
log.Warnf("duplicate decaying tag closure: %s; skipping", t.name)
return nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/options.go
index 76b4ef386..cde1fd792 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/options.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/options.go
@@ -3,6 +3,8 @@ package connmgr
import (
"errors"
"time"
+
+ "github.com/benbjohnson/clock"
)
// config is the configuration struct for the basic connection manager.
@@ -13,6 +15,7 @@ type config struct {
silencePeriod time.Duration
decayer *DecayerCfg
emergencyTrim bool
+ clock clock.Clock
}
// Option represents an option for the basic connection manager.
@@ -26,6 +29,14 @@ func DecayerConfig(opts *DecayerCfg) Option {
}
}
+// WithClock sets the internal clock impl
+func WithClock(c clock.Clock) Option {
+ return func(cfg *config) error {
+ cfg.clock = c
+ return nil
+ }
+}
+
// WithGracePeriod sets the grace period.
// The grace period is the time a newly opened connection is given before it becomes
// subject to pruning.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/mapping.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/mapping.go
deleted file mode 100644
index f9b508e4e..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/mapping.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package nat
-
-import (
- "fmt"
- "net"
- "sync"
- "time"
-)
-
-// Mapping represents a port mapping in a NAT.
-type Mapping interface {
- // NAT returns the NAT object this Mapping belongs to.
- NAT() *NAT
-
- // Protocol returns the protocol of this port mapping. This is either
- // "tcp" or "udp" as no other protocols are likely to be NAT-supported.
- Protocol() string
-
- // InternalPort returns the internal device port. Mapping will continue to
- // try to map InternalPort() to an external facing port.
- InternalPort() int
-
- // ExternalPort returns the external facing port. If the mapping is not
- // established, port will be 0
- ExternalPort() int
-
- // ExternalAddr returns the external facing address. If the mapping is not
- // established, addr will be nil, and and ErrNoMapping will be returned.
- ExternalAddr() (addr net.Addr, err error)
-
- // Close closes the port mapping
- Close() error
-}
-
-// keeps republishing
-type mapping struct {
- sync.Mutex // guards all fields
-
- nat *NAT
- proto string
- intport int
- extport int
-
- cached net.IP
- cacheTime time.Time
- cacheLk sync.Mutex
-}
-
-func (m *mapping) NAT() *NAT {
- m.Lock()
- defer m.Unlock()
- return m.nat
-}
-
-func (m *mapping) Protocol() string {
- m.Lock()
- defer m.Unlock()
- return m.proto
-}
-
-func (m *mapping) InternalPort() int {
- m.Lock()
- defer m.Unlock()
- return m.intport
-}
-
-func (m *mapping) ExternalPort() int {
- m.Lock()
- defer m.Unlock()
- return m.extport
-}
-
-func (m *mapping) setExternalPort(p int) {
- m.Lock()
- defer m.Unlock()
- m.extport = p
-}
-
-func (m *mapping) ExternalAddr() (net.Addr, error) {
- m.cacheLk.Lock()
- defer m.cacheLk.Unlock()
- oport := m.ExternalPort()
- if oport == 0 {
- // dont even try right now.
- return nil, ErrNoMapping
- }
-
- if time.Since(m.cacheTime) >= CacheTime {
- m.nat.natmu.Lock()
- cval, err := m.nat.nat.GetExternalAddress()
- m.nat.natmu.Unlock()
-
- if err != nil {
- return nil, err
- }
-
- m.cached = cval
- m.cacheTime = time.Now()
- }
- switch m.Protocol() {
- case "tcp":
- return &net.TCPAddr{
- IP: m.cached,
- Port: oport,
- }, nil
- case "udp":
- return &net.UDPAddr{
- IP: m.cached,
- Port: oport,
- }, nil
- default:
- panic(fmt.Sprintf("invalid protocol %q", m.Protocol()))
- }
-}
-
-func (m *mapping) Close() error {
- m.nat.removeMapping(m)
- return nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/nat.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/nat.go
index e2656f8bc..28ffd4a5b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/nat.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/nat/nat.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "net/netip"
"sync"
"time"
@@ -19,18 +20,30 @@ var log = logging.Logger("nat")
// MappingDuration is a default port mapping duration.
// Port mappings are renewed every (MappingDuration / 3)
-const MappingDuration = time.Second * 60
+const MappingDuration = time.Minute
// CacheTime is the time a mapping will cache an external address for
-const CacheTime = time.Second * 15
+const CacheTime = 15 * time.Second
-// DiscoverNAT looks for a NAT device in the network and
-// returns an object that can manage port mappings.
+type entry struct {
+ protocol string
+ port int
+}
+
+// so we can mock it in tests
+var discoverGateway = nat.DiscoverGateway
+
+// DiscoverNAT looks for a NAT device in the network and returns an object that can manage port mappings.
func DiscoverNAT(ctx context.Context) (*NAT, error) {
- natInstance, err := nat.DiscoverGateway(ctx)
+ natInstance, err := discoverGateway(ctx)
if err != nil {
return nil, err
}
+ var extAddr netip.Addr
+ extIP, err := natInstance.GetExternalAddress()
+ if err == nil {
+ extAddr, _ = netip.AddrFromSlice(extIP)
+ }
// Log the device addr.
addr, err := natInstance.GetDeviceAddress()
@@ -40,7 +53,20 @@ func DiscoverNAT(ctx context.Context) (*NAT, error) {
log.Debug("DiscoverGateway address:", addr)
}
- return newNAT(natInstance), nil
+ ctx, cancel := context.WithCancel(context.Background())
+ nat := &NAT{
+ nat: natInstance,
+ extAddr: extAddr,
+ mappings: make(map[entry]int),
+ ctx: ctx,
+ ctxCancel: cancel,
+ }
+ nat.refCount.Add(1)
+ go func() {
+ defer nat.refCount.Done()
+ nat.background()
+ }()
+ return nat, nil
}
// NAT is an object that manages address port mappings in
@@ -50,6 +76,8 @@ func DiscoverNAT(ctx context.Context) (*NAT, error) {
type NAT struct {
natmu sync.Mutex
nat nat.NAT
+ // External IP of the NAT. Will be renewed periodically (every CacheTime).
+ extAddr netip.Addr
refCount sync.WaitGroup
ctx context.Context
@@ -57,17 +85,7 @@ type NAT struct {
mappingmu sync.RWMutex // guards mappings
closed bool
- mappings map[*mapping]struct{}
-}
-
-func newNAT(realNAT nat.NAT) *NAT {
- ctx, cancel := context.WithCancel(context.Background())
- return &NAT{
- nat: realNAT,
- mappings: make(map[*mapping]struct{}),
- ctx: ctx,
- ctxCancel: cancel,
- }
+ mappings map[entry]int
}
// Close shuts down all port mappings. NAT can no longer be used.
@@ -81,99 +99,141 @@ func (nat *NAT) Close() error {
return nil
}
-// Mappings returns a slice of all NAT mappings
-func (nat *NAT) Mappings() []Mapping {
+func (nat *NAT) GetMapping(protocol string, port int) (addr netip.AddrPort, found bool) {
nat.mappingmu.Lock()
- maps2 := make([]Mapping, 0, len(nat.mappings))
- for m := range nat.mappings {
- maps2 = append(maps2, m)
+ defer nat.mappingmu.Unlock()
+
+ if !nat.extAddr.IsValid() {
+ return netip.AddrPort{}, false
}
- nat.mappingmu.Unlock()
- return maps2
+ extPort, found := nat.mappings[entry{protocol: protocol, port: port}]
+ if !found {
+ return netip.AddrPort{}, false
+ }
+ return netip.AddrPortFrom(nat.extAddr, uint16(extPort)), true
}
-// NewMapping attempts to construct a mapping on protocol and internal port
-// It will also periodically renew the mapping until the returned Mapping
-// -- or its parent NAT -- is Closed.
+// AddMapping attempts to construct a mapping on protocol and internal port.
+// It blocks until a mapping was established. Once added, it periodically renews the mapping.
//
// May not succeed, and mappings may change over time;
// NAT devices may not respect our port requests, and even lie.
-// Clients should not store the mapped results, but rather always
-// poll our object for the latest mappings.
-func (nat *NAT) NewMapping(protocol string, port int) (Mapping, error) {
- if nat == nil {
- return nil, fmt.Errorf("no nat available")
- }
-
+func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error {
switch protocol {
case "tcp", "udp":
default:
- return nil, fmt.Errorf("invalid protocol: %s", protocol)
- }
-
- m := &mapping{
- intport: port,
- nat: nat,
- proto: protocol,
+ return fmt.Errorf("invalid protocol: %s", protocol)
}
nat.mappingmu.Lock()
+ defer nat.mappingmu.Unlock()
+
if nat.closed {
- nat.mappingmu.Unlock()
- return nil, errors.New("closed")
+ return errors.New("closed")
}
- nat.mappings[m] = struct{}{}
- nat.refCount.Add(1)
- nat.mappingmu.Unlock()
- go nat.refreshMappings(m)
// do it once synchronously, so first mapping is done right away, and before exiting,
// allowing users -- in the optimistic case -- to use results right after.
- nat.establishMapping(m)
- return m, nil
+ extPort := nat.establishMapping(ctx, protocol, port)
+ nat.mappings[entry{protocol: protocol, port: port}] = extPort
+ return nil
}
-func (nat *NAT) removeMapping(m *mapping) {
+// RemoveMapping removes a port mapping.
+// It blocks until the NAT has removed the mapping.
+func (nat *NAT) RemoveMapping(ctx context.Context, protocol string, port int) error {
nat.mappingmu.Lock()
- delete(nat.mappings, m)
- nat.mappingmu.Unlock()
- nat.natmu.Lock()
- nat.nat.DeletePortMapping(m.Protocol(), m.InternalPort())
- nat.natmu.Unlock()
+ defer nat.mappingmu.Unlock()
+
+ switch protocol {
+ case "tcp", "udp":
+ e := entry{protocol: protocol, port: port}
+ if _, ok := nat.mappings[e]; ok {
+ delete(nat.mappings, e)
+ return nat.nat.DeletePortMapping(ctx, protocol, port)
+ }
+ return errors.New("unknown mapping")
+ default:
+ return fmt.Errorf("invalid protocol: %s", protocol)
+ }
}
-func (nat *NAT) refreshMappings(m *mapping) {
- defer nat.refCount.Done()
- t := time.NewTicker(MappingDuration / 3)
+func (nat *NAT) background() {
+ const mappingUpdate = MappingDuration / 3
+
+ now := time.Now()
+ nextMappingUpdate := now.Add(mappingUpdate)
+ nextAddrUpdate := now.Add(CacheTime)
+
+ t := time.NewTimer(minTime(nextMappingUpdate, nextAddrUpdate).Sub(now)) // don't use a ticker here. We don't know how long establishing the mappings takes.
defer t.Stop()
+ var in []entry
+ var out []int // port numbers
for {
select {
- case <-t.C:
- nat.establishMapping(m)
+ case now := <-t.C:
+ if now.After(nextMappingUpdate) {
+ in = in[:0]
+ out = out[:0]
+ nat.mappingmu.Lock()
+ for e := range nat.mappings {
+ in = append(in, e)
+ }
+ nat.mappingmu.Unlock()
+ // Establishing the mapping involves network requests.
+ // Don't hold the mutex, just save the ports.
+ for _, e := range in {
+ out = append(out, nat.establishMapping(nat.ctx, e.protocol, e.port))
+ }
+ nat.mappingmu.Lock()
+ for i, p := range in {
+ if _, ok := nat.mappings[p]; !ok {
+ continue // entry might have been deleted
+ }
+ nat.mappings[p] = out[i]
+ }
+ nat.mappingmu.Unlock()
+ nextMappingUpdate = time.Now().Add(mappingUpdate)
+ }
+ if now.After(nextAddrUpdate) {
+ var extAddr netip.Addr
+ extIP, err := nat.nat.GetExternalAddress()
+ if err == nil {
+ extAddr, _ = netip.AddrFromSlice(extIP)
+ }
+ nat.extAddr = extAddr
+ nextAddrUpdate = time.Now().Add(CacheTime)
+ }
+ t.Reset(time.Until(minTime(nextAddrUpdate, nextMappingUpdate)))
case <-nat.ctx.Done():
- m.Close()
+ nat.mappingmu.Lock()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ for e := range nat.mappings {
+ delete(nat.mappings, e)
+ nat.nat.DeletePortMapping(ctx, e.protocol, e.port)
+ }
+ nat.mappingmu.Unlock()
return
}
}
}
-func (nat *NAT) establishMapping(m *mapping) {
- oldport := m.ExternalPort()
-
- log.Debugf("Attempting port map: %s/%d", m.Protocol(), m.InternalPort())
+func (nat *NAT) establishMapping(ctx context.Context, protocol string, internalPort int) (externalPort int) {
+ log.Debugf("Attempting port map: %s/%d", protocol, internalPort)
const comment = "libp2p"
nat.natmu.Lock()
- newport, err := nat.nat.AddPortMapping(m.Protocol(), m.InternalPort(), comment, MappingDuration)
+ var err error
+ externalPort, err = nat.nat.AddPortMapping(ctx, protocol, internalPort, comment, MappingDuration)
if err != nil {
// Some hardware does not support mappings with timeout, so try that
- newport, err = nat.nat.AddPortMapping(m.Protocol(), m.InternalPort(), comment, 0)
+ externalPort, err = nat.nat.AddPortMapping(ctx, protocol, internalPort, comment, 0)
}
nat.natmu.Unlock()
- if err != nil || newport == 0 {
- m.setExternalPort(0) // clear mapping
+ if err != nil || externalPort == 0 {
// TODO: log.Event
if err != nil {
log.Warnf("failed to establish port mapping: %s", err)
@@ -182,12 +242,16 @@ func (nat *NAT) establishMapping(m *mapping) {
}
// we do not close if the mapping failed,
// because it may work again next time.
- return
+ return 0
}
- m.setExternalPort(newport)
- log.Debugf("NAT Mapping: %d --> %d (%s)", m.ExternalPort(), m.InternalPort(), m.Protocol())
- if oldport != 0 && newport != oldport {
- log.Debugf("failed to renew same port mapping: ch %d -> %d", oldport, newport)
+ log.Debugf("NAT Mapping: %d --> %d (%s)", externalPort, internalPort, protocol)
+ return externalPort
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
}
+ return b
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/clock.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/clock.go
new file mode 100644
index 000000000..6b63ac9c8
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/clock.go
@@ -0,0 +1,49 @@
+package swarm
+
+import "time"
+
+// InstantTimer is a timer that triggers at some instant rather than some duration
+type InstantTimer interface {
+ Reset(d time.Time) bool
+ Stop() bool
+ Ch() <-chan time.Time
+}
+
+// Clock is a clock that can create timers that trigger at some
+// instant rather than some duration
+type Clock interface {
+ Now() time.Time
+ Since(t time.Time) time.Duration
+ InstantTimer(when time.Time) InstantTimer
+}
+
+type RealTimer struct{ t *time.Timer }
+
+var _ InstantTimer = (*RealTimer)(nil)
+
+func (t RealTimer) Ch() <-chan time.Time {
+ return t.t.C
+}
+
+func (t RealTimer) Reset(d time.Time) bool {
+ return t.t.Reset(time.Until(d))
+}
+
+func (t RealTimer) Stop() bool {
+ return t.t.Stop()
+}
+
+type RealClock struct{}
+
+var _ Clock = RealClock{}
+
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+func (RealClock) Since(t time.Time) time.Duration {
+ return time.Since(t)
+}
+func (RealClock) InstantTimer(when time.Time) InstantTimer {
+ t := time.NewTimer(time.Until(when))
+ return &RealTimer{t}
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_ranker.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_ranker.go
new file mode 100644
index 000000000..479db7ff9
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_ranker.go
@@ -0,0 +1,170 @@
+package swarm
+
+import (
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// The 250ms value is from happy eyeballs RFC 8305. This is a rough estimate of 1 RTT
+const (
+ // duration by which TCP dials are delayed relative to QUIC dial
+ PublicTCPDelay = 250 * time.Millisecond
+ PrivateTCPDelay = 30 * time.Millisecond
+
+ // duration by which QUIC dials are delayed relative to first QUIC dial
+ PublicQUICDelay = 250 * time.Millisecond
+ PrivateQUICDelay = 30 * time.Millisecond
+
+ // RelayDelay is the duration by which relay dials are delayed relative to direct addresses
+ RelayDelay = 250 * time.Millisecond
+)
+
+// NoDelayDialRanker ranks addresses with no delay. This is useful for simultaneous connect requests.
+func NoDelayDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
+ return getAddrDelay(addrs, 0, 0, 0)
+}
+
+// DefaultDialRanker determines the ranking of outgoing connection attempts.
+//
+// Addresses are grouped into four distinct groups:
+//
+// - private addresses (localhost and local networks (RFC 1918))
+// - public IPv4 addresses
+// - public IPv6 addresses
+// - relay addresses
+//
+// Within each group, the addresses are ranked according to the ranking logic described below.
+// We then dial addresses according to this ranking, with short timeouts applied between dial attempts.
+// This ranking logic dramatically reduces the number of simultaneous dial attempts, while introducing
+// no additional latency in the vast majority of cases.
+//
+// The private, public IPv4 and public IPv6 groups are dialed in parallel.
+// Dialing relay addresses is delayed by 500 ms, if we have any non-relay alternatives.
+//
+// In a future iteration, IPv6 will be given a headstart over IPv4, as recommended by Happy Eyeballs RFC 8305.
+// This is not enabled yet, since some ISPs are still IPv4-only, and dialing IPv6 addresses will therefore
+// always fail.
+// The correct solution is to detect this situation, and not attempt to dial IPv6 addresses at all.
+// IPv6 blackhole detection is tracked in https://github.com/libp2p/go-libp2p/issues/1605.
+//
+// Within each group (private, public IPv4, public IPv6, relay addresses) we apply the following
+// ranking logic:
+//
+// 1. If two QUIC addresses are present, dial the QUIC address with the lowest port first:
+// This is more likely to be the listen port. After this we dial the rest of the QUIC addresses delayed by
+// 250ms (PublicQUICDelay) for public addresses, and 30ms (PrivateQUICDelay) for local addresses.
+// 2. If a QUIC or WebTransport address is present, TCP addresses dials are delayed relative to the last QUIC dial:
+// We prefer to end up with a QUIC connection. For public addresses, the delay introduced is 250ms (PublicTCPDelay),
+// and for private addresses 30ms (PrivateTCPDelay).
+func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
+ relay, addrs := filterAddrs(addrs, isRelayAddr)
+ pvt, addrs := filterAddrs(addrs, manet.IsPrivateAddr)
+ ip4, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { return isProtocolAddr(a, ma.P_IP4) })
+ ip6, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { return isProtocolAddr(a, ma.P_IP6) })
+
+ var relayOffset time.Duration = 0
+ if len(ip4) > 0 || len(ip6) > 0 {
+ // if there is a public direct address available delay relay dials
+ relayOffset = RelayDelay
+ }
+
+ res := make([]network.AddrDelay, 0, len(addrs))
+ for i := 0; i < len(addrs); i++ {
+ res = append(res, network.AddrDelay{Addr: addrs[i], Delay: 0})
+ }
+ res = append(res, getAddrDelay(pvt, PrivateTCPDelay, PrivateQUICDelay, 0)...)
+ res = append(res, getAddrDelay(ip4, PublicTCPDelay, PublicQUICDelay, 0)...)
+ res = append(res, getAddrDelay(ip6, PublicTCPDelay, PublicQUICDelay, 0)...)
+ res = append(res, getAddrDelay(relay, PublicTCPDelay, PublicQUICDelay, relayOffset)...)
+ return res
+}
+
+// getAddrDelay ranks a group of addresses(private, ip4, ip6) according to the ranking logic
+// explained in defaultDialRanker.
+// offset is used to delay all addresses by a fixed duration. This is useful for delaying all relay
+// addresses relative to direct addresses
+func getAddrDelay(addrs []ma.Multiaddr, tcpDelay time.Duration, quicDelay time.Duration,
+ offset time.Duration) []network.AddrDelay {
+ sort.Slice(addrs, func(i, j int) bool { return score(addrs[i]) < score(addrs[j]) })
+
+ res := make([]network.AddrDelay, 0, len(addrs))
+ quicCount := 0
+ for _, a := range addrs {
+ delay := offset
+ switch {
+ case isProtocolAddr(a, ma.P_QUIC) || isProtocolAddr(a, ma.P_QUIC_V1):
+ // For QUIC addresses we dial a single address first and then wait for QUICDelay
+ // After QUICDelay we dial rest of the QUIC addresses
+ if quicCount > 0 {
+ delay += quicDelay
+ }
+ quicCount++
+ case isProtocolAddr(a, ma.P_TCP):
+ if quicCount >= 2 {
+ delay += 2 * quicDelay
+ } else if quicCount == 1 {
+ delay += tcpDelay
+ }
+ }
+ res = append(res, network.AddrDelay{Addr: a, Delay: delay})
+ }
+ return res
+}
+
+// score scores a multiaddress for dialing delay. lower is better
+func score(a ma.Multiaddr) int {
+ // the lower 16 bits of the result are the relavant port
+ // the higher bits rank the protocol
+ // low ports are ranked higher because they're more likely to
+ // be listen addresses
+ if _, err := a.ValueForProtocol(ma.P_WEBTRANSPORT); err == nil {
+ p, _ := a.ValueForProtocol(ma.P_UDP)
+ pi, _ := strconv.Atoi(p) // cannot error
+ return pi + (1 << 18)
+ }
+ if _, err := a.ValueForProtocol(ma.P_QUIC); err == nil {
+ p, _ := a.ValueForProtocol(ma.P_UDP)
+ pi, _ := strconv.Atoi(p) // cannot error
+ return pi + (1 << 17)
+ }
+ if _, err := a.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ p, _ := a.ValueForProtocol(ma.P_UDP)
+ pi, _ := strconv.Atoi(p) // cannot error
+ return pi
+ }
+
+ if p, err := a.ValueForProtocol(ma.P_TCP); err == nil {
+ pi, _ := strconv.Atoi(p) // cannot error
+ return pi + (1 << 19)
+ }
+ return (1 << 30)
+}
+
+func isProtocolAddr(a ma.Multiaddr, p int) bool {
+ found := false
+ ma.ForEach(a, func(c ma.Component) bool {
+ if c.Protocol().Code == p {
+ found = true
+ return false
+ }
+ return true
+ })
+ return found
+}
+
+// filterAddrs filters an address slice in place
+func filterAddrs(addrs []ma.Multiaddr, f func(a ma.Multiaddr) bool) (filtered, rest []ma.Multiaddr) {
+ j := 0
+ for i := 0; i < len(addrs); i++ {
+ if f(addrs[i]) {
+ addrs[i], addrs[j] = addrs[j], addrs[i]
+ j++
+ }
+ }
+ return addrs[:j], addrs[j:]
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go
index ff339d2f2..5688494f4 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go
@@ -2,13 +2,14 @@ package swarm
import (
"context"
+ "math"
"sync"
+ "time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
)
// /////////////////////////////////////////////////////////////////////////////////
@@ -16,78 +17,159 @@ import (
// TODO explain how all this works
// ////////////////////////////////////////////////////////////////////////////////
+// dialRequest is structure used to request dials to the peer associated with a
+// worker loop
type dialRequest struct {
- ctx context.Context
+ // ctx is the context that may be used for the request
+ // if another concurrent request is made, any of the concurrent request's ctx may be used for
+ // dials to the peer's addresses
+ // ctx for simultaneous connect requests have higher priority than normal requests
+ ctx context.Context
+ // resch is the channel used to send the response for this query
resch chan dialResponse
}
+// dialResponse is the response sent to dialRequests on the request's resch channel
type dialResponse struct {
+ // conn is the connection to the peer on success
conn *Conn
- err error
+ // err is the error in dialing the peer
+ // nil on connection success
+ err error
}
+// pendRequest is used to track progress on a dialRequest.
type pendRequest struct {
- req dialRequest // the original request
- err *DialError // dial error accumulator
- addrs map[ma.Multiaddr]struct{} // pending addr dials
+ // req is the original dialRequest
+ req dialRequest
+ // err comprises errors of all failed dials
+ err *DialError
+ // addrs are the addresses on which we are waiting for pending dials
+ // At the time of creation addrs is initialised to all the addresses of the peer. On a failed dial,
+ // the addr is removed from the map and err is updated. On a successful dial, the dialRequest is
+ // completed and response is sent with the connection
+ addrs map[string]struct{}
}
+// addrDial tracks dials to a particular multiaddress.
type addrDial struct {
- addr ma.Multiaddr
- ctx context.Context
- conn *Conn
- err error
+ // addr is the address dialed
+ addr ma.Multiaddr
+ // ctx is the context used for dialing the address
+ ctx context.Context
+ // conn is the established connection on success
+ conn *Conn
+ // err is the err on dialing the address
+ err error
+ // requests is the list of pendRequests interested in this dial
+ // the value in the slice is the request number assigned to this request by the dialWorker
requests []int
- dialed bool
+ // dialed indicates whether we have triggered the dial to the address
+ dialed bool
+ // createdAt is the time this struct was created
+ createdAt time.Time
+ // dialRankingDelay is the delay in dialing this address introduced by the ranking logic
+ dialRankingDelay time.Duration
}
+// dialWorker synchronises concurrent dials to a peer. It ensures that we make at most one dial to a
+// peer's address
type dialWorker struct {
- s *Swarm
- peer peer.ID
- reqch <-chan dialRequest
- reqno int
- requests map[int]*pendRequest
- pending map[ma.Multiaddr]*addrDial
- resch chan dialResult
+ s *Swarm
+ peer peer.ID
+ // reqch is used to send dial requests to the worker. close reqch to end the worker loop
+ reqch <-chan dialRequest
+ // reqno is the request number used to track different dialRequests for a peer.
+ // Each incoming request is assigned a reqno. This reqno is used in pendingRequests and in
+ // addrDial objects in trackedDials to track this request
+ reqno int
+ // pendingRequests maps reqno to the pendRequest object for a dialRequest
+ pendingRequests map[int]*pendRequest
+ // trackedDials tracks dials to the peers addresses. An entry here is used to ensure that
+ // we dial an address at most once
+ trackedDials map[string]*addrDial
+ // resch is used to receive response for dials to the peers addresses.
+ resch chan dialResult
connected bool // true when a connection has been successfully established
- nextDial []ma.Multiaddr
-
- // ready when we have more addresses to dial (nextDial is not empty)
- triggerDial <-chan struct{}
-
// for testing
wg sync.WaitGroup
+ cl Clock
}
-func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest) *dialWorker {
+func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest, cl Clock) *dialWorker {
+ if cl == nil {
+ cl = RealClock{}
+ }
return &dialWorker{
- s: s,
- peer: p,
- reqch: reqch,
- requests: make(map[int]*pendRequest),
- pending: make(map[ma.Multiaddr]*addrDial),
- resch: make(chan dialResult),
+ s: s,
+ peer: p,
+ reqch: reqch,
+ pendingRequests: make(map[int]*pendRequest),
+ trackedDials: make(map[string]*addrDial),
+ resch: make(chan dialResult),
+ cl: cl,
}
}
+// loop implements the core dial worker loop. Requests are received on w.reqch.
+// The loop exits when w.reqch is closed.
func (w *dialWorker) loop() {
w.wg.Add(1)
defer w.wg.Done()
defer w.s.limiter.clearAllPeerDials(w.peer)
- // used to signal readiness to dial and completion of the dial
- ready := make(chan struct{})
- close(ready)
+ // dq is used to pace dials to different addresses of the peer
+ dq := newDialQueue()
+ // dialsInFlight is the number of dials in flight.
+ dialsInFlight := 0
+
+ startTime := w.cl.Now()
+ // dialTimer is the dialTimer used to trigger dials
+ dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64))
+ timerRunning := true
+ // scheduleNextDial updates timer for triggering the next dial
+ scheduleNextDial := func() {
+ if timerRunning && !dialTimer.Stop() {
+ <-dialTimer.Ch()
+ }
+ timerRunning = false
+ if dq.len() > 0 {
+ if dialsInFlight == 0 && !w.connected {
+ // if there are no dials in flight, trigger the next dials immediately
+ dialTimer.Reset(startTime)
+ } else {
+ dialTimer.Reset(startTime.Add(dq.top().Delay))
+ }
+ timerRunning = true
+ }
+ }
+ // totalDials is used to track number of dials made by this worker for metrics
+ totalDials := 0
loop:
for {
+ // The loop has three parts
+ // 1. Input requests are received on w.reqch. If a suitable connection is not available we create
+ // a pendRequest object to track the dialRequest and add the addresses to dq.
+ // 2. Addresses from the dialQueue are dialed at appropriate time intervals depending on delay logic.
+ // We are notified of the completion of these dials on w.resch.
+ // 3. Responses for dials are received on w.resch. On receiving a response, we updated the pendRequests
+ // interested in dials on this address.
+
select {
case req, ok := <-w.reqch:
if !ok {
+ if w.s.metricsTracer != nil {
+ w.s.metricsTracer.DialCompleted(w.connected, totalDials)
+ }
return
}
+ // We have received a new request. If we do not have a suitable connection,
+ // track this dialRequest with a pendRequest.
+ // Enqueue the peer's addresses relevant to this request in dq and
+ // track dials to the addresses relevant to this request.
c, err := w.s.bestAcceptableConnToPeer(req.ctx, w.peer)
if c != nil || err != nil {
@@ -101,29 +183,34 @@ loop:
continue loop
}
- // at this point, len(addrs) > 0 or else it would be error from addrsForDial
- // ranke them to process in order
- addrs = w.rankAddrs(addrs)
+ // get the delays to dial these addrs from the swarms dialRanker
+ simConnect, _, _ := network.GetSimultaneousConnect(req.ctx)
+ addrRanking := w.rankAddrs(addrs, simConnect)
+ addrDelay := make(map[string]time.Duration, len(addrRanking))
// create the pending request object
pr := &pendRequest{
req: req,
err: &DialError{Peer: w.peer},
- addrs: make(map[ma.Multiaddr]struct{}),
+ addrs: make(map[string]struct{}, len(addrRanking)),
}
- for _, a := range addrs {
- pr.addrs[a] = struct{}{}
+ for _, adelay := range addrRanking {
+ pr.addrs[string(adelay.Addr.Bytes())] = struct{}{}
+ addrDelay[string(adelay.Addr.Bytes())] = adelay.Delay
}
- // check if any of the addrs has been successfully dialed and accumulate
- // errors from complete dials while collecting new addrs to dial/join
+ // Check if dials to any of the addrs have completed already
+ // If they have errored, record the error in pr. If they have succeeded,
+ // respond with the connection.
+ // If they are pending, add them to tojoin.
+ // If we haven't seen any of the addresses before, add them to todial.
var todial []ma.Multiaddr
var tojoin []*addrDial
- for _, a := range addrs {
- ad, ok := w.pending[a]
+ for _, adelay := range addrRanking {
+ ad, ok := w.trackedDials[string(adelay.Addr.Bytes())]
if !ok {
- todial = append(todial, a)
+ todial = append(todial, adelay.Addr)
continue
}
@@ -135,8 +222,8 @@ loop:
if ad.err != nil {
// dial to this addr errored, accumulate the error
- pr.err.recordErr(a, ad.err)
- delete(pr.addrs, a)
+ pr.err.recordErr(ad.addr, ad.err)
+ delete(pr.addrs, string(ad.addr.Bytes()))
continue
}
@@ -150,52 +237,91 @@ loop:
continue loop
}
- // the request has some pending or new dials, track it and schedule new dials
+ // The request has some pending or new dials. We assign this request a request number.
+ // This value of w.reqno is used to track this request in all the structures
w.reqno++
- w.requests[w.reqno] = pr
+ w.pendingRequests[w.reqno] = pr
for _, ad := range tojoin {
if !ad.dialed {
+ // we haven't dialed this address. update the ad.ctx to have simultaneous connect values
+ // set correctly
if simConnect, isClient, reason := network.GetSimultaneousConnect(req.ctx); simConnect {
if simConnect, _, _ := network.GetSimultaneousConnect(ad.ctx); !simConnect {
ad.ctx = network.WithSimultaneousConnect(ad.ctx, isClient, reason)
+ // update the element in dq to use the simultaneous connect delay.
+ dq.Add(network.AddrDelay{
+ Addr: ad.addr,
+ Delay: addrDelay[string(ad.addr.Bytes())],
+ })
}
}
}
+ // add the request to the addrDial
ad.requests = append(ad.requests, w.reqno)
}
if len(todial) > 0 {
+ now := time.Now()
+ // these are new addresses, track them and add them to dq
for _, a := range todial {
- w.pending[a] = &addrDial{addr: a, ctx: req.ctx, requests: []int{w.reqno}}
+ w.trackedDials[string(a.Bytes())] = &addrDial{
+ addr: a,
+ ctx: req.ctx,
+ requests: []int{w.reqno},
+ createdAt: now,
+ }
+ dq.Add(network.AddrDelay{Addr: a, Delay: addrDelay[string(a.Bytes())]})
}
-
- w.nextDial = append(w.nextDial, todial...)
- w.nextDial = w.rankAddrs(w.nextDial)
-
- // trigger a new dial now to account for the new addrs we added
- w.triggerDial = ready
}
-
- case <-w.triggerDial:
- for _, addr := range w.nextDial {
+ // setup dialTimer for updates to dq
+ scheduleNextDial()
+
+ case <-dialTimer.Ch():
+ // It's time to dial the next batch of addresses.
+ // We don't check the delay of the addresses received from the queue here
+ // because if the timer triggered before the delay, it means that all
+ // the inflight dials have errored and we should dial the next batch of
+ // addresses
+ now := time.Now()
+ for _, adelay := range dq.NextBatch() {
// spawn the dial
- ad := w.pending[addr]
- err := w.s.dialNextAddr(ad.ctx, w.peer, addr, w.resch)
+ ad, ok := w.trackedDials[string(adelay.Addr.Bytes())]
+ if !ok {
+ log.Errorf("SWARM BUG: no entry for address %s in trackedDials", adelay.Addr)
+ continue
+ }
+ ad.dialed = true
+ ad.dialRankingDelay = now.Sub(ad.createdAt)
+ err := w.s.dialNextAddr(ad.ctx, w.peer, ad.addr, w.resch)
if err != nil {
+ // the actual dial happens in a different go routine. An err here
+ // only happens in case of backoff. handle that.
w.dispatchError(ad, err)
+ } else {
+ // the dial was successful. update inflight dials
+ dialsInFlight++
+ totalDials++
}
}
-
- w.nextDial = nil
- w.triggerDial = nil
+ timerRunning = false
+ // schedule more dials
+ scheduleNextDial()
case res := <-w.resch:
- if res.Conn != nil {
- w.connected = true
- }
+ // A dial to an address has completed.
+ // Update all requests waiting on this address. On success, complete the request.
+ // On error, record the error
- ad := w.pending[res.Addr]
+ dialsInFlight--
+ ad, ok := w.trackedDials[string(res.Addr.Bytes())]
+ if !ok {
+ log.Errorf("SWARM BUG: no entry for address %s in trackedDials", res.Addr)
+ if res.Conn != nil {
+ res.Conn.Close()
+ }
+ continue
+ }
if res.Conn != nil {
// we got a connection, add it to the swarm
@@ -207,21 +333,27 @@ loop:
continue loop
}
- // dispatch to still pending requests
+ // request succeeded, respond to all pending requests
for _, reqno := range ad.requests {
- pr, ok := w.requests[reqno]
+ pr, ok := w.pendingRequests[reqno]
if !ok {
- // it has already dispatched a connection
+ // some other dial for this request succeeded before this one
continue
}
-
pr.req.resch <- dialResponse{conn: conn}
- delete(w.requests, reqno)
+ delete(w.pendingRequests, reqno)
}
ad.conn = conn
ad.requests = nil
+ if !w.connected {
+ w.connected = true
+ if w.s.metricsTracer != nil {
+ w.s.metricsTracer.DialRankingDelay(ad.dialRankingDelay)
+ }
+ }
+
continue loop
}
@@ -231,8 +363,11 @@ loop:
// for consistency with the old dialer behavior.
w.s.backf.AddBackoff(w.peer, res.Addr)
}
-
w.dispatchError(ad, res.Err)
+ // Only schedule next dial on error.
+ // If we scheduleNextDial on success, we will end up making one dial more than
+ // required because the final successful dial will spawn one more dial
+ scheduleNextDial()
}
}
}
@@ -241,16 +376,16 @@ loop:
func (w *dialWorker) dispatchError(ad *addrDial, err error) {
ad.err = err
for _, reqno := range ad.requests {
- pr, ok := w.requests[reqno]
+ pr, ok := w.pendingRequests[reqno]
if !ok {
- // has already been dispatched
+ // some other dial for this request succeeded before this one
continue
}
// accumulate the error
pr.err.recordErr(ad.addr, err)
- delete(pr.addrs, ad.addr)
+ delete(pr.addrs, string(ad.addr.Bytes()))
if len(pr.addrs) == 0 {
// all addrs have erred, dispatch dial error
// but first do a last one check in case an acceptable connection has landed from
@@ -261,7 +396,7 @@ func (w *dialWorker) dispatchError(ad *addrDial, err error) {
} else {
pr.req.resch <- dialResponse{err: pr.err}
}
- delete(w.requests, reqno)
+ delete(w.pendingRequests, reqno)
}
}
@@ -271,46 +406,82 @@ func (w *dialWorker) dispatchError(ad *addrDial, err error) {
// this is necessary to support active listen scenarios, where a new dial comes in while
// another dial is in progress, and needs to do a direct connection without inhibitions from
// dial backoff.
- // it is also necessary to preserve consisent behaviour with the old dialer -- TestDialBackoff
- // regresses without this.
if err == ErrDialBackoff {
- delete(w.pending, ad.addr)
+ delete(w.trackedDials, string(ad.addr.Bytes()))
}
}
-// ranks addresses in descending order of preference for dialing, with the following rules:
-// NonRelay > Relay
-// NonWS > WS
-// Private > Public
-// UDP > TCP
-func (w *dialWorker) rankAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
- addrTier := func(a ma.Multiaddr) (tier int) {
- if isRelayAddr(a) {
- tier |= 0b1000
- }
- if isExpensiveAddr(a) {
- tier |= 0b0100
- }
- if !manet.IsPrivateAddr(a) {
- tier |= 0b0010
- }
- if isFdConsumingAddr(a) {
- tier |= 0b0001
+// rankAddrs ranks addresses for dialing. if it's a simConnect request we
+// dial all addresses immediately without any delay
+func (w *dialWorker) rankAddrs(addrs []ma.Multiaddr, isSimConnect bool) []network.AddrDelay {
+ if isSimConnect {
+ return NoDelayDialRanker(addrs)
+ }
+ return w.s.dialRanker(addrs)
+}
+
+// dialQueue is a priority queue used to schedule dials
+type dialQueue struct {
+ // q contains dials ordered by delay
+ q []network.AddrDelay
+}
+
+// newDialQueue returns a new dialQueue
+func newDialQueue() *dialQueue {
+ return &dialQueue{q: make([]network.AddrDelay, 0, 16)}
+}
+
+// Add adds adelay to the queue. If another element exists in the queue with
+// the same address, it replaces that element.
+func (dq *dialQueue) Add(adelay network.AddrDelay) {
+ for i := 0; i < dq.len(); i++ {
+ if dq.q[i].Addr.Equal(adelay.Addr) {
+ if dq.q[i].Delay == adelay.Delay {
+ // existing element is the same. nothing to do
+ return
+ }
+ // remove the element
+ copy(dq.q[i:], dq.q[i+1:])
+ dq.q = dq.q[:len(dq.q)-1]
+ break
}
+ }
- return tier
+ for i := 0; i < dq.len(); i++ {
+ if dq.q[i].Delay > adelay.Delay {
+ dq.q = append(dq.q, network.AddrDelay{}) // extend the slice
+ copy(dq.q[i+1:], dq.q[i:])
+ dq.q[i] = adelay
+ return
+ }
}
+ dq.q = append(dq.q, adelay)
+}
- tiers := make([][]ma.Multiaddr, 16)
- for _, a := range addrs {
- tier := addrTier(a)
- tiers[tier] = append(tiers[tier], a)
+// NextBatch returns all the elements in the queue with the highest priority
+func (dq *dialQueue) NextBatch() []network.AddrDelay {
+ if dq.len() == 0 {
+ return nil
}
- result := make([]ma.Multiaddr, 0, len(addrs))
- for _, tier := range tiers {
- result = append(result, tier...)
+ // i is the index of the second highest priority element
+ var i int
+ for i = 0; i < dq.len(); i++ {
+ if dq.q[i].Delay != dq.q[0].Delay {
+ break
+ }
}
+ res := dq.q[:i]
+ dq.q = dq.q[i:]
+ return res
+}
+
+// top returns the top element of the queue
+func (dq *dialQueue) top() network.AddrDelay {
+ return dq.q[0]
+}
- return result
+// len returns the number of elements in the queue
+func (dq *dialQueue) len() int {
+ return len(dq.q)
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go
index 80d67eb97..d4815ef60 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@@ -19,6 +20,7 @@ import (
logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
)
const (
@@ -54,6 +56,14 @@ func WithConnectionGater(gater connmgr.ConnectionGater) Option {
}
}
+// WithMultiaddrResolver sets a custom multiaddress resolver
+func WithMultiaddrResolver(maResolver *madns.Resolver) Option {
+ return func(s *Swarm) error {
+ s.maResolver = maResolver
+ return nil
+ }
+}
+
// WithMetrics sets a metrics reporter
func WithMetrics(reporter metrics.Reporter) Option {
return func(s *Swarm) error {
@@ -62,6 +72,13 @@ func WithMetrics(reporter metrics.Reporter) Option {
}
}
+func WithMetricsTracer(t MetricsTracer) Option {
+ return func(s *Swarm) error {
+ s.metricsTracer = t
+ return nil
+ }
+}
+
func WithDialTimeout(t time.Duration) Option {
return func(s *Swarm) error {
s.dialTimeout = t
@@ -83,6 +100,14 @@ func WithResourceManager(m network.ResourceManager) Option {
}
}
+// WithDialRanker configures swarm to use d as the DialRanker
+func WithDialRanker(d network.DialRanker) Option {
+ return func(s *Swarm) error {
+ s.dialRanker = d
+ return nil
+ }
+}
+
// Swarm is a connection muxer, allowing connections to other peers to
// be opened and closed, while still using the same Chan for all
// communication. The Chan sends/receives Messages, which note the
@@ -95,6 +120,8 @@ type Swarm struct {
// down before continuing.
refs sync.WaitGroup
+ emitter event.Emitter
+
rcmgr network.ResourceManager
local peer.ID
@@ -127,8 +154,10 @@ type Swarm struct {
m map[int]transport.Transport
}
+ maResolver *madns.Resolver
+
// stream handlers
- streamh atomic.Value
+ streamh atomic.Pointer[network.StreamHandler]
// dialing helpers
dsync *dialSync
@@ -140,19 +169,29 @@ type Swarm struct {
ctx context.Context // is canceled when Close is called
ctxCancel context.CancelFunc
- bwc metrics.Reporter
+ bwc metrics.Reporter
+ metricsTracer MetricsTracer
+
+ dialRanker network.DialRanker
}
// NewSwarm constructs a Swarm.
-func NewSwarm(local peer.ID, peers peerstore.Peerstore, opts ...Option) (*Swarm, error) {
+func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts ...Option) (*Swarm, error) {
+ emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))
+ if err != nil {
+ return nil, err
+ }
ctx, cancel := context.WithCancel(context.Background())
s := &Swarm{
local: local,
peers: peers,
+ emitter: emitter,
ctx: ctx,
ctxCancel: cancel,
dialTimeout: defaultDialTimeout,
dialTimeoutLocal: defaultDialTimeoutLocal,
+ maResolver: madns.DefaultResolver,
+ dialRanker: DefaultDialRanker,
}
s.conns.m = make(map[peer.ID][]*Conn)
@@ -166,7 +205,7 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, opts ...Option) (*Swarm,
}
}
if s.rcmgr == nil {
- s.rcmgr = network.NullResourceManager
+ s.rcmgr = &network.NullResourceManager{}
}
s.dsync = newDialSync(s.dialWorkerLoop)
@@ -183,6 +222,8 @@ func (s *Swarm) Close() error {
func (s *Swarm) close() {
s.ctxCancel()
+ s.emitter.Close()
+
// Prevents new connections and/or listeners from being added to the swarm.
s.listeners.Lock()
listeners := s.listeners.m
@@ -199,7 +240,7 @@ func (s *Swarm) close() {
for l := range listeners {
go func(l transport.Listener) {
- if err := l.Close(); err != nil {
+ if err := l.Close(); err != nil && err != transport.ErrListenerClosed {
log.Errorf("error when shutting down listener: %s", err)
}
}(l)
@@ -225,8 +266,14 @@ func (s *Swarm) close() {
s.transports.m = nil
s.transports.Unlock()
- var wg sync.WaitGroup
+ // Dedup transports that may be listening on multiple protocols
+ transportsToClose := make(map[transport.Transport]struct{}, len(transports))
for _, t := range transports {
+ transportsToClose[t] = struct{}{}
+ }
+
+ var wg sync.WaitGroup
+ for t := range transportsToClose {
if closer, ok := t.(io.Closer); ok {
wg.Add(1)
go func(c io.Closer) {
@@ -293,6 +340,7 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
}
c.streams.m = make(map[*Stream]struct{})
+ isFirstConnection := len(s.conns.m[p]) == 0
s.conns.m[p] = append(s.conns.m[p], c)
// Add two swarm refs:
@@ -305,6 +353,15 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
c.notifyLk.Lock()
s.conns.Unlock()
+ // Emit event after releasing `s.conns` lock so that a consumer can still
+ // use swarm methods that need the `s.conns` lock.
+ if isFirstConnection {
+ s.emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: p,
+ Connectedness: network.Connected,
+ })
+ }
+
s.notifyAll(func(f network.Notifiee) {
f.Connected(s, c)
})
@@ -321,13 +378,16 @@ func (s *Swarm) Peerstore() peerstore.Peerstore {
// SetStreamHandler assigns the handler for new streams.
func (s *Swarm) SetStreamHandler(handler network.StreamHandler) {
- s.streamh.Store(handler)
+ s.streamh.Store(&handler)
}
// StreamHandler gets the handler for new streams.
func (s *Swarm) StreamHandler() network.StreamHandler {
- handler, _ := s.streamh.Load().(network.StreamHandler)
- return handler
+ handler := s.streamh.Load()
+ if handler == nil {
+ return nil
+ }
+ return *handler
}
// NewStream creates a new stream on any available connection to peer, dialing
@@ -581,21 +641,33 @@ func (s *Swarm) removeConn(c *Conn) {
p := c.RemotePeer()
s.conns.Lock()
- defer s.conns.Unlock()
+
cs := s.conns.m[p]
+
+ if len(cs) == 1 {
+ delete(s.conns.m, p)
+ s.conns.Unlock()
+
+ // Emit event after releasing `s.conns` lock so that a consumer can still
+ // use swarm methods that need the `s.conns` lock.
+ s.emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: p,
+ Connectedness: network.NotConnected,
+ })
+ return
+ }
+
+ defer s.conns.Unlock()
+
for i, ci := range cs {
if ci == c {
- if len(cs) == 1 {
- delete(s.conns.m, p)
- } else {
- // NOTE: We're intentionally preserving order.
- // This way, connections to a peer are always
- // sorted oldest to newest.
- copy(cs[i:], cs[i+1:])
- cs[len(cs)-1] = nil
- s.conns.m[p] = cs[:len(cs)-1]
- }
- return
+ // NOTE: We're intentionally preserving order.
+ // This way, connections to a peer are always
+ // sorted oldest to newest.
+ copy(cs[i:], cs[i+1:])
+ cs[len(cs)-1] = nil
+ s.conns.m[p] = cs[:len(cs)-1]
+ break
}
}
}
@@ -612,3 +684,34 @@ func (s *Swarm) ResourceManager() network.ResourceManager {
// Swarm is a Network.
var _ network.Network = (*Swarm)(nil)
var _ transport.TransportNetwork = (*Swarm)(nil)
+
+type connWithMetrics struct {
+ transport.CapableConn
+ opened time.Time
+ dir network.Direction
+ metricsTracer MetricsTracer
+}
+
+func wrapWithMetrics(capableConn transport.CapableConn, metricsTracer MetricsTracer, opened time.Time, dir network.Direction) connWithMetrics {
+ c := connWithMetrics{CapableConn: capableConn, opened: opened, dir: dir, metricsTracer: metricsTracer}
+ c.metricsTracer.OpenedConnection(c.dir, capableConn.RemotePublicKey(), capableConn.ConnState(), capableConn.LocalMultiaddr())
+ return c
+}
+
+func (c connWithMetrics) completedHandshake() {
+ c.metricsTracer.CompletedHandshake(time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
+}
+
+func (c connWithMetrics) Close() error {
+ c.metricsTracer.ClosedConnection(c.dir, time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
+ return c.CapableConn.Close()
+}
+
+func (c connWithMetrics) Stat() network.ConnStats {
+ if cs, ok := c.CapableConn.(network.ConnStat); ok {
+ return cs.Stat()
+ }
+ return network.ConnStats{}
+}
+
+var _ network.ConnStat = connWithMetrics{}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_addr.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_addr.go
index 8d088e76d..b2e3e4e8a 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_addr.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_addr.go
@@ -16,7 +16,7 @@ func (s *Swarm) ListenAddresses() []ma.Multiaddr {
}
func (s *Swarm) listenAddressesNoLock() []ma.Multiaddr {
- addrs := make([]ma.Multiaddr, 0, len(s.listeners.m))
+ addrs := make([]ma.Multiaddr, 0, len(s.listeners.m)+10) // A bit extra so we may avoid an extra allocation in the for loop below.
for l := range s.listeners.m {
addrs = append(addrs, l.Multiaddr())
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go
index 779ee3737..e770381a2 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go
@@ -43,6 +43,10 @@ type Conn struct {
var _ network.Conn = &Conn{}
+func (c *Conn) IsClosed() bool {
+ return c.conn.IsClosed()
+}
+
func (c *Conn) ID() string {
// format: -
return fmt.Sprintf("%s-%d", c.RemotePeer().Pretty()[0:10], c.id)
@@ -126,6 +130,7 @@ func (c *Conn) start() {
// We only get an error here when the swarm is closed or closing.
if err != nil {
+ scope.Done()
return
}
@@ -168,16 +173,17 @@ func (c *Conn) RemotePeer() peer.ID {
return c.conn.RemotePeer()
}
-// LocalPrivateKey is the public key of the peer on this side
-func (c *Conn) LocalPrivateKey() ic.PrivKey {
- return c.conn.LocalPrivateKey()
-}
-
// RemotePublicKey is the public key of the peer on the remote side
func (c *Conn) RemotePublicKey() ic.PubKey {
return c.conn.RemotePublicKey()
}
+// ConnState is the security connection state. including early data result.
+// Empty if not supported.
+func (c *Conn) ConnState() network.ConnectionState {
+ return c.conn.ConnState()
+}
+
// Stat returns metadata pertaining to this connection
func (c *Conn) Stat() network.ConnStats {
c.streams.Lock()
@@ -197,11 +203,20 @@ func (c *Conn) NewStream(ctx context.Context) (network.Stream, error) {
if err != nil {
return nil, err
}
- ts, err := c.conn.OpenStream(ctx)
+
+ s, err := c.openAndAddStream(ctx, scope)
if err != nil {
scope.Done()
return nil, err
}
+ return s, nil
+}
+
+func (c *Conn) openAndAddStream(ctx context.Context, scope network.StreamManagementScope) (network.Stream, error) {
+ ts, err := c.conn.OpenStream(ctx)
+ if err != nil {
+ return nil, err
+ }
return c.addStream(ts, network.DirOutbound, scope)
}
@@ -210,7 +225,6 @@ func (c *Conn) addStream(ts network.MuxedStream, dir network.Direction, scope ne
// Are we still online?
if c.streams.m == nil {
c.streams.Unlock()
- scope.Done()
ts.Reset()
return nil, ErrConnClosed
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go
index 0e232a188..f0c941320 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go
@@ -4,18 +4,25 @@ import (
"context"
"errors"
"fmt"
+ "net/netip"
+ "strconv"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/canonicallog"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/transport"
-
ma "github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
)
+// The maximum number of address resolution steps we'll perform for a single
+// peer (for all addresses).
+const maxAddressResolution = 32
+
// Diagram of dial sync:
//
// many callers of Dial() synched w. dials many addrs results to callers
@@ -68,33 +75,12 @@ const ConcurrentFdDials = 160
// per peer
var DefaultPerPeerRateLimit = 8
-// dialbackoff is a struct used to avoid over-dialing the same, dead peers.
-// Whenever we totally time out on a peer (all three attempts), we add them
-// to dialbackoff. Then, whenevers goroutines would _wait_ (dialsync), they
-// check dialbackoff. If it's there, they don't wait and exit promptly with
-// an error. (the single goroutine that is actually dialing continues to
-// dial). If a dial is successful, the peer is removed from backoff.
-// Example:
-//
-// for {
-// if ok, wait := dialsync.Lock(p); !ok {
-// if backoff.Backoff(p) {
-// return errDialFailed
-// }
-// <-wait
-// continue
-// }
-// defer dialsync.Unlock(p)
-// c, err := actuallyDial(p)
-// if err != nil {
-// dialbackoff.AddBackoff(p)
-// continue
-// }
-// dialbackoff.Clear(p)
-// }
-//
-
-// DialBackoff is a type for tracking peer dial backoffs.
+// DialBackoff is a type for tracking peer dial backoffs. Dialbackoff is used to
+// avoid over-dialing the same, dead peers. Whenever we totally time out on all
+// addresses of a peer, we add the addresses to DialBackoff. Then, whenever we
+// attempt to dial the peer again, we check each address for backoff. If it's on
+// backoff, we don't dial the address and exit promptly. If a dial is
+// successful, the peer and all its addresses are removed from backoff.
//
// * It's safe to use its zero value.
// * It's thread-safe.
@@ -132,8 +118,8 @@ func (db *DialBackoff) background(ctx context.Context) {
// Backoff returns whether the client should backoff from dialing
// peer p at address addr
func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) {
- db.lock.Lock()
- defer db.lock.Unlock()
+ db.lock.RLock()
+ defer db.lock.RUnlock()
ap, found := db.entries[p][string(addr.Bytes())]
return found && time.Now().Before(ap.until)
@@ -148,9 +134,7 @@ var BackoffCoef = time.Second
// BackoffMax is the maximum backoff time (default: 5m).
var BackoffMax = time.Minute * 5
-// AddBackoff lets other nodes know that we've entered backoff with
-// peer p, so dialers should not wait unnecessarily. We still will
-// attempt to dial with one goroutine, in case we get through.
+// AddBackoff adds peer's address to backoff.
//
// Backoff is not exponential, it's quadratic and computed according to the
// following formula:
@@ -262,6 +246,13 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
conn, err = s.dsync.Dial(ctx, p)
if err == nil {
+ // Ensure we connected to the correct peer.
+ // This was most likely already checked by the security protocol, but it doesn't hurt do it again here.
+ if conn.RemotePeer() != p {
+ conn.Close()
+ log.Errorw("Handshake failed to properly authenticate peer", "authenticated", conn.RemotePeer(), "expected", p)
+ return nil, fmt.Errorf("unexpected peer")
+ }
return conn, nil
}
@@ -282,7 +273,7 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
// dialWorkerLoop synchronizes and executes concurrent dials to a single peer
func (s *Swarm) dialWorkerLoop(p peer.ID, reqch <-chan dialRequest) {
- w := newDialWorker(s, p, reqch)
+ w := newDialWorker(s, p, reqch, nil)
w.loop()
}
@@ -292,18 +283,108 @@ func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) ([]ma.Multiaddr, er
return nil, ErrNoAddresses
}
- goodAddrs := s.filterKnownUndialables(p, peerAddrs)
+ peerAddrsAfterTransportResolved := make([]ma.Multiaddr, 0, len(peerAddrs))
+ for _, a := range peerAddrs {
+ tpt := s.TransportForDialing(a)
+ resolver, ok := tpt.(transport.Resolver)
+ if ok {
+ resolvedAddrs, err := resolver.Resolve(ctx, a)
+ if err != nil {
+ log.Warnf("Failed to resolve multiaddr %s by transport %v: %v", a, tpt, err)
+ continue
+ }
+ peerAddrsAfterTransportResolved = append(peerAddrsAfterTransportResolved, resolvedAddrs...)
+ } else {
+ peerAddrsAfterTransportResolved = append(peerAddrsAfterTransportResolved, a)
+ }
+ }
+
+ // Resolve dns or dnsaddrs
+ resolved, err := s.resolveAddrs(ctx, peer.AddrInfo{
+ ID: p,
+ Addrs: peerAddrsAfterTransportResolved,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ goodAddrs := s.filterKnownUndialables(p, resolved)
if forceDirect, _ := network.GetForceDirectDial(ctx); forceDirect {
goodAddrs = ma.FilterAddrs(goodAddrs, s.nonProxyAddr)
}
+ goodAddrs = network.DedupAddrs(goodAddrs)
if len(goodAddrs) == 0 {
return nil, ErrNoGoodAddresses
}
+ s.peers.AddAddrs(p, goodAddrs, peerstore.TempAddrTTL)
+
return goodAddrs, nil
}
+func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) ([]ma.Multiaddr, error) {
+ proto := ma.ProtocolWithCode(ma.P_P2P).Name
+ p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
+ if err != nil {
+ return nil, err
+ }
+
+ resolveSteps := 0
+
+ // Recursively resolve all addrs.
+ //
+ // While the toResolve list is non-empty:
+ // * Pop an address off.
+ // * If the address is fully resolved, add it to the resolved list.
+ // * Otherwise, resolve it and add the results to the "to resolve" list.
+ toResolve := append(([]ma.Multiaddr)(nil), pi.Addrs...)
+ resolved := make([]ma.Multiaddr, 0, len(pi.Addrs))
+ for len(toResolve) > 0 {
+ // pop the last addr off.
+ addr := toResolve[len(toResolve)-1]
+ toResolve = toResolve[:len(toResolve)-1]
+
+ // if it's resolved, add it to the resolved list.
+ if !madns.Matches(addr) {
+ resolved = append(resolved, addr)
+ continue
+ }
+
+ resolveSteps++
+
+ // We've resolved too many addresses. We can keep all the fully
+ // resolved addresses but we'll need to skip the rest.
+ if resolveSteps >= maxAddressResolution {
+ log.Warnf(
+ "peer %s asked us to resolve too many addresses: %s/%s",
+ pi.ID,
+ resolveSteps,
+ maxAddressResolution,
+ )
+ continue
+ }
+
+ // otherwise, resolve it
+ reqaddr := addr.Encapsulate(p2paddr)
+ resaddrs, err := s.maResolver.Resolve(ctx, reqaddr)
+ if err != nil {
+ log.Infof("error resolving %s: %s", reqaddr, err)
+ }
+
+ // add the results to the toResolve list.
+ for _, res := range resaddrs {
+ pi, err := peer.AddrInfoFromP2pAddr(res)
+ if err != nil {
+ log.Infof("error parsing %s: %s", res, err)
+ }
+ toResolve = append(toResolve, pi.Addrs...)
+ }
+ }
+
+ return resolved, nil
+}
+
func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, resch chan dialResult) error {
// check the dial backoff
if forceDirect, _ := network.GetForceDirectDial(ctx); !forceDirect {
@@ -331,22 +412,33 @@ func (s *Swarm) nonProxyAddr(addr ma.Multiaddr) bool {
// filterKnownUndialables takes a list of multiaddrs, and removes those
// that we definitely don't want to dial: addresses configured to be blocked,
// IPv6 link-local addresses, addresses without a dial-capable transport,
-// and addresses that we know to be our own.
-// This is an optimization to avoid wasting time on dials that we know are going to fail.
+// addresses that we know to be our own, and addresses with a better tranport
+// available. This is an optimization to avoid wasting time on dials that we
+// know are going to fail or for which we have a better alternative.
func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) []ma.Multiaddr {
lisAddrs, _ := s.InterfaceListenAddresses()
var ourAddrs []ma.Multiaddr
for _, addr := range lisAddrs {
- protos := addr.Protocols()
// we're only sure about filtering out /ip4 and /ip6 addresses, so far
- if protos[0].Code == ma.P_IP4 || protos[0].Code == ma.P_IP6 {
- ourAddrs = append(ourAddrs, addr)
- }
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_IP4 || c.Protocol().Code == ma.P_IP6 {
+ ourAddrs = append(ourAddrs, addr)
+ }
+ return false
+ })
}
+ // The order of these two filters is important. If we can only dial /webtransport,
+ // we don't want to filter /webtransport addresses out because the peer had a /quic-v1
+ // address
+
+ // filter addresses we cannot dial
+ addrs = ma.FilterAddrs(addrs, s.canDial)
+ // filter low priority addresses among the addresses we can dial
+ addrs = filterLowPriorityAddresses(addrs)
+
return ma.FilterAddrs(addrs,
func(addr ma.Multiaddr) bool { return !ma.Contains(ourAddrs, addr) },
- s.canDial,
// TODO: Consider allowing link-local addresses
func(addr ma.Multiaddr) bool { return !manet.IsIP6LinkLocal(addr) },
func(addr ma.Multiaddr) bool {
@@ -378,6 +470,11 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (tra
if s.local == p {
return nil, ErrDialToSelf
}
+ // Check before we start work
+ if err := ctx.Err(); err != nil {
+ log.Debugf("%s swarm not dialing. Context cancelled: %v. %s %s", s.local, err, p, addr)
+ return nil, err
+ }
log.Debugf("%s swarm dialing %s %s", s.local, p, addr)
tpt := s.TransportForDialing(addr)
@@ -385,11 +482,20 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (tra
return nil, ErrNoTransport
}
+ start := time.Now()
connC, err := tpt.Dial(ctx, addr, p)
if err != nil {
+ if s.metricsTracer != nil {
+ s.metricsTracer.FailedDialing(addr, err)
+ }
return nil, err
}
canonicallog.LogPeerStatus(100, connC.RemotePeer(), connC.RemoteMultiaddr(), "connection_status", "established", "dir", "outbound")
+ if s.metricsTracer != nil {
+ connWithMetrics := wrapWithMetrics(connC, s.metricsTracer, start, network.DirOutbound)
+ connWithMetrics.completedHandshake()
+ connC = connWithMetrics
+ }
// Trust the transport? Yeah... right.
if connC.RemotePeer() != p {
@@ -424,14 +530,84 @@ func isFdConsumingAddr(addr ma.Multiaddr) bool {
return err1 == nil || err2 == nil
}
-func isExpensiveAddr(addr ma.Multiaddr) bool {
- _, wsErr := addr.ValueForProtocol(ma.P_WS)
- _, wssErr := addr.ValueForProtocol(ma.P_WSS)
- _, wtErr := addr.ValueForProtocol(ma.P_WEBTRANSPORT)
- return wsErr == nil || wssErr == nil || wtErr == nil
-}
-
func isRelayAddr(addr ma.Multiaddr) bool {
_, err := addr.ValueForProtocol(ma.P_CIRCUIT)
return err == nil
}
+
+// filterLowPriorityAddresses removes addresses inplace for which we have a better alternative
+// 1. If a /quic-v1 address is present, filter out /quic and /webtransport address on the same 2-tuple:
+// QUIC v1 is preferred over the deprecated QUIC draft-29, and given the choice, we prefer using
+// raw QUIC over using WebTransport.
+// 2. If a /tcp address is present, filter out /ws or /wss addresses on the same 2-tuple:
+// We prefer using raw TCP over using WebSocket.
+func filterLowPriorityAddresses(addrs []ma.Multiaddr) []ma.Multiaddr {
+ // make a map of QUIC v1 and TCP AddrPorts.
+ quicV1Addr := make(map[netip.AddrPort]struct{})
+ tcpAddr := make(map[netip.AddrPort]struct{})
+ for _, a := range addrs {
+ switch {
+ case isProtocolAddr(a, ma.P_WEBTRANSPORT):
+ case isProtocolAddr(a, ma.P_QUIC_V1):
+ ap, err := addrPort(a, ma.P_UDP)
+ if err != nil {
+ continue
+ }
+ quicV1Addr[ap] = struct{}{}
+ case isProtocolAddr(a, ma.P_WS) || isProtocolAddr(a, ma.P_WSS):
+ case isProtocolAddr(a, ma.P_TCP):
+ ap, err := addrPort(a, ma.P_TCP)
+ if err != nil {
+ continue
+ }
+ tcpAddr[ap] = struct{}{}
+ }
+ }
+
+ i := 0
+ for _, a := range addrs {
+ switch {
+ case isProtocolAddr(a, ma.P_WEBTRANSPORT) || isProtocolAddr(a, ma.P_QUIC):
+ ap, err := addrPort(a, ma.P_UDP)
+ if err != nil {
+ break
+ }
+ if _, ok := quicV1Addr[ap]; ok {
+ continue
+ }
+ case isProtocolAddr(a, ma.P_WS) || isProtocolAddr(a, ma.P_WSS):
+ ap, err := addrPort(a, ma.P_TCP)
+ if err != nil {
+ break
+ }
+ if _, ok := tcpAddr[ap]; ok {
+ continue
+ }
+ }
+ addrs[i] = a
+ i++
+ }
+ return addrs[:i]
+}
+
+// addrPort returns the ip and port for a. p should be either ma.P_TCP or ma.P_UDP.
+// a must be an (ip, TCP) or (ip, udp) address.
+func addrPort(a ma.Multiaddr, p int) (netip.AddrPort, error) {
+ ip, err := manet.ToIP(a)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ port, err := a.ValueForProtocol(p)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ pi, err := strconv.Atoi(port)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ addr, ok := netip.AddrFromSlice(ip)
+ if !ok {
+ return netip.AddrPort{}, fmt.Errorf("failed to parse IP %s", ip)
+ }
+ return netip.AddrPortFrom(addr, uint16(pi)), nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go
index 044da2e8d..0905e8451 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go
@@ -1,6 +1,7 @@
package swarm
import (
+ "errors"
"fmt"
"time"
@@ -37,9 +38,13 @@ func (s *Swarm) Listen(addrs ...ma.Multiaddr) error {
return nil
}
-// ListenClose stop and delete listeners for all of the given addresses.
+// ListenClose stop and delete listeners for all of the given addresses. If an
+// any address belongs to one of the addreses a Listener provides, then the
+// Listener will close for *all* addresses it provides. For example if you close
+// and address with `/quic`, then the QUIC listener will close and also close
+// any `/quic-v1` address.
func (s *Swarm) ListenClose(addrs ...ma.Multiaddr) {
- var listenersToClose []transport.Listener
+ listenersToClose := make(map[transport.Listener]struct{}, len(addrs))
s.listeners.Lock()
for l := range s.listeners.m {
@@ -48,12 +53,12 @@ func (s *Swarm) ListenClose(addrs ...ma.Multiaddr) {
}
delete(s.listeners.m, l)
- listenersToClose = append(listenersToClose, l)
+ listenersToClose[l] = struct{}{}
}
s.listeners.cacheEOL = time.Time{}
s.listeners.Unlock()
- for _, l := range listenersToClose {
+ for l := range listenersToClose {
l.Close()
}
}
@@ -123,9 +128,15 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
for {
c, err := list.Accept()
if err != nil {
+ if !errors.Is(err, transport.ErrListenerClosed) {
+ log.Errorf("swarm listener for %s accept error: %s", a, err)
+ }
return
}
canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound")
+ if s.metricsTracer != nil {
+ c = wrapWithMetrics(c, s.metricsTracer, time.Now(), network.DirInbound)
+ }
log.Debugf("swarm listener accepted connection: %s <-> %s", c.LocalMultiaddr(), c.RemoteMultiaddr())
s.refs.Add(1)
@@ -149,7 +160,7 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
func containsMultiaddr(addrs []ma.Multiaddr, addr ma.Multiaddr) bool {
for _, a := range addrs {
- if addr == a {
+ if addr.Equal(a) {
return true
}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go
new file mode 100644
index 000000000..3110217f8
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go
@@ -0,0 +1,237 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+
+ ma "github.com/multiformats/go-multiaddr"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_swarm"
+
+var (
+ connsOpened = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connections_opened_total",
+ Help: "Connections Opened",
+ },
+ []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ keyTypes = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "key_types_total",
+ Help: "key type",
+ },
+ []string{"dir", "key_type"},
+ )
+ connsClosed = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connections_closed_total",
+ Help: "Connections Closed",
+ },
+ []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ dialError = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "dial_errors_total",
+ Help: "Dial Error",
+ },
+ []string{"transport", "error", "ip_version"},
+ )
+ connDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "connection_duration_seconds",
+ Help: "Duration of a Connection",
+ Buckets: prometheus.ExponentialBuckets(1.0/16, 2, 25), // up to 24 days
+ },
+ []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ connHandshakeLatency = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "handshake_latency_seconds",
+ Help: "Duration of the libp2p Handshake",
+ Buckets: prometheus.ExponentialBuckets(0.001, 1.3, 35),
+ },
+ []string{"transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ dialsPerPeer = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "dials_per_peer_total",
+ Help: "Number of addresses dialed per peer",
+ },
+ []string{"outcome", "num_dials"},
+ )
+ dialRankingDelay = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "dial_ranking_delay_seconds",
+ Help: "delay introduced by the dial ranking logic",
+ Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2},
+ },
+ )
+ collectors = []prometheus.Collector{
+ connsOpened,
+ keyTypes,
+ connsClosed,
+ dialError,
+ connDuration,
+ connHandshakeLatency,
+ dialsPerPeer,
+ dialRankingDelay,
+ }
+)
+
+type MetricsTracer interface {
+ OpenedConnection(network.Direction, crypto.PubKey, network.ConnectionState, ma.Multiaddr)
+ ClosedConnection(network.Direction, time.Duration, network.ConnectionState, ma.Multiaddr)
+ CompletedHandshake(time.Duration, network.ConnectionState, ma.Multiaddr)
+ FailedDialing(ma.Multiaddr, error)
+ DialCompleted(success bool, totalDials int)
+ DialRankingDelay(d time.Duration)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func appendConnectionState(tags []string, cs network.ConnectionState) []string {
+ if cs.Transport == "" {
+ // This shouldn't happen, unless the transport doesn't properly set the Transport field in the ConnectionState.
+ tags = append(tags, "unknown")
+ } else {
+ tags = append(tags, string(cs.Transport))
+ }
+ // These might be empty, depending on the transport.
+ // For example, QUIC doesn't set security nor muxer.
+ tags = append(tags, string(cs.Security))
+ tags = append(tags, string(cs.StreamMultiplexer))
+
+ earlyMuxer := "false"
+ if cs.UsedEarlyMuxerNegotiation {
+ earlyMuxer = "true"
+ }
+ tags = append(tags, earlyMuxer)
+ return tags
+}
+
+func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, metricshelper.GetDirection(dir))
+ *tags = appendConnectionState(*tags, cs)
+ *tags = append(*tags, metricshelper.GetIPVersion(laddr))
+ connsOpened.WithLabelValues(*tags...).Inc()
+
+ *tags = (*tags)[:0]
+ *tags = append(*tags, metricshelper.GetDirection(dir))
+ *tags = append(*tags, p.Type().String())
+ keyTypes.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, metricshelper.GetDirection(dir))
+ *tags = appendConnectionState(*tags, cs)
+ *tags = append(*tags, metricshelper.GetIPVersion(laddr))
+ connsClosed.WithLabelValues(*tags...).Inc()
+ connDuration.WithLabelValues(*tags...).Observe(duration.Seconds())
+}
+
+func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = appendConnectionState(*tags, cs)
+ *tags = append(*tags, metricshelper.GetIPVersion(laddr))
+ connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds())
+}
+
+func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, err error) {
+ transport := metricshelper.GetTransport(addr)
+ e := "other"
+ if errors.Is(err, context.Canceled) {
+ e = "canceled"
+ } else if errors.Is(err, context.DeadlineExceeded) {
+ e = "deadline"
+ } else {
+ nerr, ok := err.(net.Error)
+ if ok && nerr.Timeout() {
+ e = "timeout"
+ } else if strings.Contains(err.Error(), "connect: connection refused") {
+ e = "connection refused"
+ }
+ }
+
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, transport, e)
+ *tags = append(*tags, metricshelper.GetIPVersion(addr))
+ dialError.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) DialCompleted(success bool, totalDials int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if success {
+ *tags = append(*tags, "success")
+ } else {
+ *tags = append(*tags, "failed")
+ }
+
+ numDialLabels := [...]string{"0", "1", "2", "3", "4", "5", ">=6"}
+ var numDials string
+ if totalDials < len(numDialLabels) {
+ numDials = numDialLabels[totalDials]
+ } else {
+ numDials = numDialLabels[len(numDialLabels)-1]
+ }
+ *tags = append(*tags, numDials)
+ dialsPerPeer.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) DialRankingDelay(d time.Duration) {
+ dialRankingDelay.Observe(d.Seconds())
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go
index 7a5bb2750..d372bcd8e 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go
@@ -24,7 +24,7 @@ type Stream struct {
closeOnce sync.Once
- protocol atomic.Value
+ protocol atomic.Pointer[protocol.ID]
stat network.Stats
}
@@ -108,9 +108,11 @@ func (s *Stream) remove() {
// Protocol returns the protocol negotiated on this stream (if set).
func (s *Stream) Protocol() protocol.ID {
- // Ignore type error. It means that the protocol is unset.
- p, _ := s.protocol.Load().(protocol.ID)
- return p
+ p := s.protocol.Load()
+ if p == nil {
+ return ""
+ }
+ return *p
}
// SetProtocol sets the protocol for this stream.
@@ -123,7 +125,7 @@ func (s *Stream) SetProtocol(p protocol.ID) error {
return err
}
- s.protocol.Store(p)
+ s.protocol.Store(&p)
return nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_transport.go
index 96a3a8e2c..924f0384a 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_transport.go
@@ -19,6 +19,7 @@ func (s *Swarm) TransportForDialing(a ma.Multiaddr) transport.Transport {
s.transports.RLock()
defer s.transports.RUnlock()
+
if len(s.transports.m) == 0 {
// make sure we're not just shutting down.
if s.transports.m != nil {
@@ -26,18 +27,15 @@ func (s *Swarm) TransportForDialing(a ma.Multiaddr) transport.Transport {
}
return nil
}
-
- for _, p := range protocols {
- transport, ok := s.transports.m[p.Code]
- if !ok {
- continue
- }
- if transport.Proxy() {
- return transport
+ if isRelayAddr(a) {
+ return s.transports.m[ma.P_CIRCUIT]
+ }
+ for _, t := range s.transports.m {
+ if t.CanDial(a) {
+ return t
}
}
-
- return s.transports.m[protocols[len(protocols)-1].Code]
+ return nil
}
// TransportForListening retrieves the appropriate transport for listening on
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go
index 2fa7af64b..1c23a01ae 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go
@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/transport"
)
@@ -14,6 +15,10 @@ type transportConn struct {
transport transport.Transport
scope network.ConnManagementScope
stat network.ConnStats
+
+ muxer protocol.ID
+ security protocol.ID
+ usedEarlyMuxerNegotiation bool
}
var _ transport.CapableConn = &transportConn{}
@@ -49,3 +54,12 @@ func (t *transportConn) Close() error {
defer t.scope.Done()
return t.MuxedConn.Close()
}
+
+func (t *transportConn) ConnState() network.ConnectionState {
+ return network.ConnectionState{
+ StreamMultiplexer: t.muxer,
+ Security: t.security,
+ Transport: "tcp",
+ UsedEarlyMuxerNegotiation: t.usedEarlyMuxerNegotiation,
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/listener.go
index c07299c1a..0871d2f5a 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/listener.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/listener.go
@@ -3,6 +3,7 @@ package upgrader
import (
"context"
"fmt"
+ "strings"
"sync"
"github.com/libp2p/go-libp2p/core/network"
@@ -165,6 +166,9 @@ func (l *listener) Accept() (transport.CapableConn, error) {
return c, nil
}
}
+ if strings.Contains(l.err.Error(), "use of closed network connection") {
+ return nil, transport.ErrListenerClosed
+ }
return nil, l.err
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go
index 58347865a..d18c16ea0 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go
@@ -11,11 +11,13 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ipnet "github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/sec"
"github.com/libp2p/go-libp2p/core/transport"
"github.com/libp2p/go-libp2p/p2p/net/pnet"
manet "github.com/multiformats/go-multiaddr/net"
+ mss "github.com/multiformats/go-multistream"
)
// ErrNilPeer is returned when attempting to upgrade an outbound connection
@@ -25,17 +27,13 @@ var ErrNilPeer = errors.New("nil peer")
// AcceptQueueLength is the number of connections to fully setup before not accepting any new connections
var AcceptQueueLength = 16
-const defaultAcceptTimeout = 15 * time.Second
+const (
+ defaultAcceptTimeout = 15 * time.Second
+ defaultNegotiateTimeout = 60 * time.Second
+)
type Option func(*upgrader) error
-func WithPSK(psk ipnet.PSK) Option {
- return func(u *upgrader) error {
- u.psk = psk
- return nil
- }
-}
-
func WithAcceptTimeout(t time.Duration) Option {
return func(u *upgrader) error {
u.acceptTimeout = t
@@ -43,30 +41,26 @@ func WithAcceptTimeout(t time.Duration) Option {
}
}
-func WithConnectionGater(g connmgr.ConnectionGater) Option {
- return func(u *upgrader) error {
- u.connGater = g
- return nil
- }
-}
-
-func WithResourceManager(m network.ResourceManager) Option {
- return func(u *upgrader) error {
- u.rcmgr = m
- return nil
- }
+type StreamMuxer struct {
+ ID protocol.ID
+ Muxer network.Multiplexer
}
// Upgrader is a multistream upgrader that can upgrade an underlying connection
// to a full transport connection (secure and multiplexed).
type upgrader struct {
- secure sec.SecureMuxer
- muxer network.Multiplexer
-
psk ipnet.PSK
connGater connmgr.ConnectionGater
rcmgr network.ResourceManager
+ muxerMuxer *mss.MultistreamMuxer[protocol.ID]
+ muxers []StreamMuxer
+ muxerIDs []protocol.ID
+
+ security []sec.SecureTransport
+ securityMuxer *mss.MultistreamMuxer[protocol.ID]
+ securityIDs []protocol.ID
+
// AcceptTimeout is the maximum duration an Accept is allowed to take.
// This includes the time between accepting the raw network connection,
// protocol selection as well as the handshake, if applicable.
@@ -77,11 +71,16 @@ type upgrader struct {
var _ transport.Upgrader = &upgrader{}
-func New(secureMuxer sec.SecureMuxer, muxer network.Multiplexer, opts ...Option) (transport.Upgrader, error) {
+func New(security []sec.SecureTransport, muxers []StreamMuxer, psk ipnet.PSK, rcmgr network.ResourceManager, connGater connmgr.ConnectionGater, opts ...Option) (transport.Upgrader, error) {
u := &upgrader{
- secure: secureMuxer,
- muxer: muxer,
acceptTimeout: defaultAcceptTimeout,
+ rcmgr: rcmgr,
+ connGater: connGater,
+ psk: psk,
+ muxerMuxer: mss.NewMultistreamMuxer[protocol.ID](),
+ muxers: muxers,
+ security: security,
+ securityMuxer: mss.NewMultistreamMuxer[protocol.ID](),
}
for _, opt := range opts {
if err := opt(u); err != nil {
@@ -89,7 +88,17 @@ func New(secureMuxer sec.SecureMuxer, muxer network.Multiplexer, opts ...Option)
}
}
if u.rcmgr == nil {
- u.rcmgr = network.NullResourceManager
+ u.rcmgr = &network.NullResourceManager{}
+ }
+ u.muxerIDs = make([]protocol.ID, 0, len(muxers))
+ for _, m := range muxers {
+ u.muxerMuxer.AddHandler(m.ID, nil)
+ u.muxerIDs = append(u.muxerIDs, m.ID)
+ }
+ u.securityIDs = make([]protocol.ID, 0, len(security))
+ for _, s := range security {
+ u.securityMuxer.AddHandler(s.ID(), nil)
+ u.securityIDs = append(u.securityIDs, s.ID())
}
return u, nil
}
@@ -135,7 +144,7 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
pconn, err := pnet.NewProtectedConn(u.psk, conn)
if err != nil {
conn.Close()
- return nil, fmt.Errorf("failed to setup private network protector: %s", err)
+ return nil, fmt.Errorf("failed to setup private network protector: %w", err)
}
conn = pconn
} else if ipnet.ForcePrivateNetwork {
@@ -143,10 +152,10 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
return nil, ipnet.ErrNotInPrivateNetwork
}
- sconn, server, err := u.setupSecurity(ctx, conn, p, dir)
+ sconn, security, server, err := u.setupSecurity(ctx, conn, p, dir)
if err != nil {
conn.Close()
- return nil, fmt.Errorf("failed to negotiate security protocol: %s", err)
+ return nil, fmt.Errorf("failed to negotiate security protocol: %w", err)
}
// call the connection gater, if one is registered.
@@ -155,7 +164,7 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
log.Errorw("failed to close connection", "peer", p, "addr", maconn.RemoteMultiaddr(), "error", err)
}
return nil, fmt.Errorf("gater rejected connection with peer %s and addr %s with direction %d",
- sconn.RemotePeer().Pretty(), maconn.RemoteMultiaddr(), dir)
+ sconn.RemotePeer(), maconn.RemoteMultiaddr(), dir)
}
// Only call SetPeer if it hasn't already been set -- this can happen when we don't know
// the peer in advance and in some bug scenarios.
@@ -166,53 +175,174 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma
log.Errorw("failed to close connection", "peer", p, "addr", maconn.RemoteMultiaddr(), "error", err)
}
return nil, fmt.Errorf("resource manager connection with peer %s and addr %s with direction %d",
- sconn.RemotePeer().Pretty(), maconn.RemoteMultiaddr(), dir)
+ sconn.RemotePeer(), maconn.RemoteMultiaddr(), dir)
}
}
- smconn, err := u.setupMuxer(ctx, sconn, server, connScope.PeerScope())
+ muxer, smconn, err := u.setupMuxer(ctx, sconn, server, connScope.PeerScope())
if err != nil {
sconn.Close()
- return nil, fmt.Errorf("failed to negotiate stream multiplexer: %s", err)
+ return nil, fmt.Errorf("failed to negotiate stream multiplexer: %w", err)
}
tc := &transportConn{
- MuxedConn: smconn,
- ConnMultiaddrs: maconn,
- ConnSecurity: sconn,
- transport: t,
- stat: stat,
- scope: connScope,
+ MuxedConn: smconn,
+ ConnMultiaddrs: maconn,
+ ConnSecurity: sconn,
+ transport: t,
+ stat: stat,
+ scope: connScope,
+ muxer: muxer,
+ security: security,
+ usedEarlyMuxerNegotiation: sconn.ConnState().UsedEarlyMuxerNegotiation,
}
return tc, nil
}
-func (u *upgrader) setupSecurity(ctx context.Context, conn net.Conn, p peer.ID, dir network.Direction) (sec.SecureConn, bool, error) {
- if dir == network.DirInbound {
- return u.secure.SecureInbound(ctx, conn, p)
+func (u *upgrader) setupSecurity(ctx context.Context, conn net.Conn, p peer.ID, dir network.Direction) (sec.SecureConn, protocol.ID, bool, error) {
+ isServer := dir == network.DirInbound
+ var st sec.SecureTransport
+ var err error
+ st, isServer, err = u.negotiateSecurity(ctx, conn, isServer)
+ if err != nil {
+ return nil, "", false, err
}
- return u.secure.SecureOutbound(ctx, conn, p)
+ if isServer {
+ sconn, err := st.SecureInbound(ctx, conn, p)
+ return sconn, st.ID(), true, err
+ }
+ sconn, err := st.SecureOutbound(ctx, conn, p)
+ return sconn, st.ID(), false, err
}
-func (u *upgrader) setupMuxer(ctx context.Context, conn net.Conn, server bool, scope network.PeerScope) (network.MuxedConn, error) {
- // TODO: The muxer should take a context.
- done := make(chan struct{})
+func (u *upgrader) negotiateMuxer(nc net.Conn, isServer bool) (*StreamMuxer, error) {
+ if err := nc.SetDeadline(time.Now().Add(defaultNegotiateTimeout)); err != nil {
+ return nil, err
+ }
- var smconn network.MuxedConn
- var err error
+ var proto protocol.ID
+ if isServer {
+ selected, _, err := u.muxerMuxer.Negotiate(nc)
+ if err != nil {
+ return nil, err
+ }
+ proto = selected
+ } else {
+ selected, err := mss.SelectOneOf(u.muxerIDs, nc)
+ if err != nil {
+ return nil, err
+ }
+ proto = selected
+ }
+
+ if err := nc.SetDeadline(time.Time{}); err != nil {
+ return nil, err
+ }
+
+ if m := u.getMuxerByID(proto); m != nil {
+ return m, nil
+ }
+ return nil, fmt.Errorf("selected protocol we don't have a transport for")
+}
+
+func (u *upgrader) getMuxerByID(id protocol.ID) *StreamMuxer {
+ for _, m := range u.muxers {
+ if m.ID == id {
+ return &m
+ }
+ }
+ return nil
+}
+
+func (u *upgrader) setupMuxer(ctx context.Context, conn sec.SecureConn, server bool, scope network.PeerScope) (protocol.ID, network.MuxedConn, error) {
+ muxerSelected := conn.ConnState().StreamMultiplexer
+ // Use muxer selected from security handshake if available. Otherwise fall back to multistream-selection.
+ if len(muxerSelected) > 0 {
+ m := u.getMuxerByID(muxerSelected)
+ if m == nil {
+ return "", nil, fmt.Errorf("selected a muxer we don't know: %s", muxerSelected)
+ }
+ c, err := m.Muxer.NewConn(conn, server, scope)
+ if err != nil {
+ return "", nil, err
+ }
+ return muxerSelected, c, nil
+ }
+
+ type result struct {
+ smconn network.MuxedConn
+ muxerID protocol.ID
+ err error
+ }
+
+ done := make(chan result, 1)
+ // TODO: The muxer should take a context.
go func() {
- defer close(done)
- smconn, err = u.muxer.NewConn(conn, server, scope)
+ m, err := u.negotiateMuxer(conn, server)
+ if err != nil {
+ done <- result{err: err}
+ return
+ }
+ smconn, err := m.Muxer.NewConn(conn, server, scope)
+ done <- result{smconn: smconn, muxerID: m.ID, err: err}
}()
select {
- case <-done:
- return smconn, err
+ case r := <-done:
+ return r.muxerID, r.smconn, r.err
case <-ctx.Done():
// interrupt this process
conn.Close()
// wait to finish
<-done
- return nil, ctx.Err()
+ return "", nil, ctx.Err()
+ }
+}
+
+func (u *upgrader) getSecurityByID(id protocol.ID) sec.SecureTransport {
+ for _, s := range u.security {
+ if s.ID() == id {
+ return s
+ }
+ }
+ return nil
+}
+
+func (u *upgrader) negotiateSecurity(ctx context.Context, insecure net.Conn, server bool) (sec.SecureTransport, bool, error) {
+ type result struct {
+ proto protocol.ID
+ iamserver bool
+ err error
+ }
+
+ done := make(chan result, 1)
+ go func() {
+ if server {
+ var r result
+ r.iamserver = true
+ r.proto, _, r.err = u.securityMuxer.Negotiate(insecure)
+ done <- r
+ return
+ }
+ var r result
+ r.proto, r.iamserver, r.err = mss.SelectWithSimopenOrFail(u.securityIDs, insecure)
+ done <- r
+ }()
+
+ select {
+ case r := <-done:
+ if r.err != nil {
+ return nil, false, r.err
+ }
+ if s := u.getSecurityByID(r.proto); s != nil {
+ return s, r.iamserver, nil
+ }
+ return nil, false, fmt.Errorf("selected unknown security transport: %s", r.proto)
+ case <-ctx.Done():
+ // We *must* do this. We have outstanding work on the connection
+ // and it's no longer safe to use.
+ insecure.Close()
+ <-done // wait to stop using the connection.
+ return nil, false, ctx.Err()
}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile
deleted file mode 100644
index fd1106850..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --gogofast_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go
deleted file mode 100644
index 1715f4193..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go
+++ /dev/null
@@ -1,868 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: circuitv1.proto
-
-package circuitv1_pb
-
-import (
- fmt "fmt"
- github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type CircuitRelay_Status int32
-
-const (
- CircuitRelay_SUCCESS CircuitRelay_Status = 100
- CircuitRelay_HOP_SRC_ADDR_TOO_LONG CircuitRelay_Status = 220
- CircuitRelay_HOP_DST_ADDR_TOO_LONG CircuitRelay_Status = 221
- CircuitRelay_HOP_SRC_MULTIADDR_INVALID CircuitRelay_Status = 250
- CircuitRelay_HOP_DST_MULTIADDR_INVALID CircuitRelay_Status = 251
- CircuitRelay_HOP_NO_CONN_TO_DST CircuitRelay_Status = 260
- CircuitRelay_HOP_CANT_DIAL_DST CircuitRelay_Status = 261
- CircuitRelay_HOP_CANT_OPEN_DST_STREAM CircuitRelay_Status = 262
- CircuitRelay_HOP_CANT_SPEAK_RELAY CircuitRelay_Status = 270
- CircuitRelay_HOP_CANT_RELAY_TO_SELF CircuitRelay_Status = 280
- CircuitRelay_STOP_SRC_ADDR_TOO_LONG CircuitRelay_Status = 320
- CircuitRelay_STOP_DST_ADDR_TOO_LONG CircuitRelay_Status = 321
- CircuitRelay_STOP_SRC_MULTIADDR_INVALID CircuitRelay_Status = 350
- CircuitRelay_STOP_DST_MULTIADDR_INVALID CircuitRelay_Status = 351
- CircuitRelay_STOP_RELAY_REFUSED CircuitRelay_Status = 390
- CircuitRelay_MALFORMED_MESSAGE CircuitRelay_Status = 400
-)
-
-var CircuitRelay_Status_name = map[int32]string{
- 100: "SUCCESS",
- 220: "HOP_SRC_ADDR_TOO_LONG",
- 221: "HOP_DST_ADDR_TOO_LONG",
- 250: "HOP_SRC_MULTIADDR_INVALID",
- 251: "HOP_DST_MULTIADDR_INVALID",
- 260: "HOP_NO_CONN_TO_DST",
- 261: "HOP_CANT_DIAL_DST",
- 262: "HOP_CANT_OPEN_DST_STREAM",
- 270: "HOP_CANT_SPEAK_RELAY",
- 280: "HOP_CANT_RELAY_TO_SELF",
- 320: "STOP_SRC_ADDR_TOO_LONG",
- 321: "STOP_DST_ADDR_TOO_LONG",
- 350: "STOP_SRC_MULTIADDR_INVALID",
- 351: "STOP_DST_MULTIADDR_INVALID",
- 390: "STOP_RELAY_REFUSED",
- 400: "MALFORMED_MESSAGE",
-}
-
-var CircuitRelay_Status_value = map[string]int32{
- "SUCCESS": 100,
- "HOP_SRC_ADDR_TOO_LONG": 220,
- "HOP_DST_ADDR_TOO_LONG": 221,
- "HOP_SRC_MULTIADDR_INVALID": 250,
- "HOP_DST_MULTIADDR_INVALID": 251,
- "HOP_NO_CONN_TO_DST": 260,
- "HOP_CANT_DIAL_DST": 261,
- "HOP_CANT_OPEN_DST_STREAM": 262,
- "HOP_CANT_SPEAK_RELAY": 270,
- "HOP_CANT_RELAY_TO_SELF": 280,
- "STOP_SRC_ADDR_TOO_LONG": 320,
- "STOP_DST_ADDR_TOO_LONG": 321,
- "STOP_SRC_MULTIADDR_INVALID": 350,
- "STOP_DST_MULTIADDR_INVALID": 351,
- "STOP_RELAY_REFUSED": 390,
- "MALFORMED_MESSAGE": 400,
-}
-
-func (x CircuitRelay_Status) Enum() *CircuitRelay_Status {
- p := new(CircuitRelay_Status)
- *p = x
- return p
-}
-
-func (x CircuitRelay_Status) String() string {
- return proto.EnumName(CircuitRelay_Status_name, int32(x))
-}
-
-func (x *CircuitRelay_Status) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CircuitRelay_Status_value, data, "CircuitRelay_Status")
- if err != nil {
- return err
- }
- *x = CircuitRelay_Status(value)
- return nil
-}
-
-func (CircuitRelay_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_129c008e7addef67, []int{0, 0}
-}
-
-type CircuitRelay_Type int32
-
-const (
- CircuitRelay_HOP CircuitRelay_Type = 1
- CircuitRelay_STOP CircuitRelay_Type = 2
- CircuitRelay_STATUS CircuitRelay_Type = 3
- CircuitRelay_CAN_HOP CircuitRelay_Type = 4
-)
-
-var CircuitRelay_Type_name = map[int32]string{
- 1: "HOP",
- 2: "STOP",
- 3: "STATUS",
- 4: "CAN_HOP",
-}
-
-var CircuitRelay_Type_value = map[string]int32{
- "HOP": 1,
- "STOP": 2,
- "STATUS": 3,
- "CAN_HOP": 4,
-}
-
-func (x CircuitRelay_Type) Enum() *CircuitRelay_Type {
- p := new(CircuitRelay_Type)
- *p = x
- return p
-}
-
-func (x CircuitRelay_Type) String() string {
- return proto.EnumName(CircuitRelay_Type_name, int32(x))
-}
-
-func (x *CircuitRelay_Type) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CircuitRelay_Type_value, data, "CircuitRelay_Type")
- if err != nil {
- return err
- }
- *x = CircuitRelay_Type(value)
- return nil
-}
-
-func (CircuitRelay_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_129c008e7addef67, []int{0, 1}
-}
-
-type CircuitRelay struct {
- Type *CircuitRelay_Type `protobuf:"varint,1,opt,name=type,enum=circuitv1.pb.CircuitRelay_Type" json:"type,omitempty"`
- SrcPeer *CircuitRelay_Peer `protobuf:"bytes,2,opt,name=srcPeer" json:"srcPeer,omitempty"`
- DstPeer *CircuitRelay_Peer `protobuf:"bytes,3,opt,name=dstPeer" json:"dstPeer,omitempty"`
- Code *CircuitRelay_Status `protobuf:"varint,4,opt,name=code,enum=circuitv1.pb.CircuitRelay_Status" json:"code,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CircuitRelay) Reset() { *m = CircuitRelay{} }
-func (m *CircuitRelay) String() string { return proto.CompactTextString(m) }
-func (*CircuitRelay) ProtoMessage() {}
-func (*CircuitRelay) Descriptor() ([]byte, []int) {
- return fileDescriptor_129c008e7addef67, []int{0}
-}
-func (m *CircuitRelay) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CircuitRelay) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CircuitRelay.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CircuitRelay) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CircuitRelay.Merge(m, src)
-}
-func (m *CircuitRelay) XXX_Size() int {
- return m.Size()
-}
-func (m *CircuitRelay) XXX_DiscardUnknown() {
- xxx_messageInfo_CircuitRelay.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CircuitRelay proto.InternalMessageInfo
-
-func (m *CircuitRelay) GetType() CircuitRelay_Type {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return CircuitRelay_HOP
-}
-
-func (m *CircuitRelay) GetSrcPeer() *CircuitRelay_Peer {
- if m != nil {
- return m.SrcPeer
- }
- return nil
-}
-
-func (m *CircuitRelay) GetDstPeer() *CircuitRelay_Peer {
- if m != nil {
- return m.DstPeer
- }
- return nil
-}
-
-func (m *CircuitRelay) GetCode() CircuitRelay_Status {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return CircuitRelay_SUCCESS
-}
-
-type CircuitRelay_Peer struct {
- Id []byte `protobuf:"bytes,1,req,name=id" json:"id,omitempty"`
- Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CircuitRelay_Peer) Reset() { *m = CircuitRelay_Peer{} }
-func (m *CircuitRelay_Peer) String() string { return proto.CompactTextString(m) }
-func (*CircuitRelay_Peer) ProtoMessage() {}
-func (*CircuitRelay_Peer) Descriptor() ([]byte, []int) {
- return fileDescriptor_129c008e7addef67, []int{0, 0}
-}
-func (m *CircuitRelay_Peer) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CircuitRelay_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CircuitRelay_Peer.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CircuitRelay_Peer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CircuitRelay_Peer.Merge(m, src)
-}
-func (m *CircuitRelay_Peer) XXX_Size() int {
- return m.Size()
-}
-func (m *CircuitRelay_Peer) XXX_DiscardUnknown() {
- xxx_messageInfo_CircuitRelay_Peer.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CircuitRelay_Peer proto.InternalMessageInfo
-
-func (m *CircuitRelay_Peer) GetId() []byte {
- if m != nil {
- return m.Id
- }
- return nil
-}
-
-func (m *CircuitRelay_Peer) GetAddrs() [][]byte {
- if m != nil {
- return m.Addrs
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("circuitv1.pb.CircuitRelay_Status", CircuitRelay_Status_name, CircuitRelay_Status_value)
- proto.RegisterEnum("circuitv1.pb.CircuitRelay_Type", CircuitRelay_Type_name, CircuitRelay_Type_value)
- proto.RegisterType((*CircuitRelay)(nil), "circuitv1.pb.CircuitRelay")
- proto.RegisterType((*CircuitRelay_Peer)(nil), "circuitv1.pb.CircuitRelay.Peer")
-}
-
-func init() { proto.RegisterFile("circuitv1.proto", fileDescriptor_129c008e7addef67) }
-
-var fileDescriptor_129c008e7addef67 = []byte{
- // 475 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4d, 0x6f, 0xd3, 0x30,
- 0x18, 0x80, 0x65, 0x27, 0xb4, 0xe8, 0x5d, 0x35, 0x8c, 0x35, 0x46, 0x56, 0x44, 0x57, 0x7a, 0xea,
- 0x01, 0x55, 0x62, 0x88, 0x03, 0x47, 0x93, 0xb8, 0x5b, 0x45, 0x1a, 0x57, 0xb6, 0x8b, 0xc4, 0xc9,
- 0x2a, 0x4d, 0x0e, 0x95, 0x90, 0x5a, 0xa5, 0x19, 0x52, 0xef, 0xb0, 0x23, 0xe2, 0x06, 0x3f, 0x07,
- 0x38, 0x71, 0xe4, 0x07, 0xf0, 0xa5, 0xfe, 0x0c, 0xb8, 0x20, 0xbb, 0x34, 0xab, 0xe8, 0x34, 0xed,
- 0xd8, 0xf7, 0x79, 0x1e, 0xd7, 0x79, 0x13, 0xb8, 0x31, 0x9e, 0xe4, 0xe3, 0xd3, 0x49, 0xf1, 0xea,
- 0x41, 0x67, 0x96, 0x4f, 0x8b, 0x29, 0xad, 0x6d, 0x0c, 0x5e, 0xb4, 0xde, 0x57, 0xa0, 0x16, 0xae,
- 0x06, 0x32, 0x7b, 0x39, 0x5a, 0xd0, 0x87, 0xe0, 0x17, 0x8b, 0x59, 0x16, 0xa0, 0x26, 0x6a, 0xef,
- 0x1e, 0x1d, 0x76, 0x36, 0xed, 0xce, 0xa6, 0xd9, 0xd1, 0x8b, 0x59, 0x26, 0x9d, 0x4c, 0x1f, 0x43,
- 0x75, 0x9e, 0x8f, 0x07, 0x59, 0x96, 0x07, 0xb8, 0x89, 0xda, 0x3b, 0x97, 0x76, 0x56, 0x93, 0x6b,
- 0xdf, 0xa6, 0xe9, 0xbc, 0x70, 0xa9, 0x77, 0xc5, 0xf4, 0x9f, 0x4f, 0x1f, 0x81, 0x3f, 0x9e, 0xa6,
- 0x59, 0xe0, 0xbb, 0xab, 0xde, 0xbb, 0xa4, 0x53, 0xc5, 0xa8, 0x38, 0x9d, 0x4b, 0xa7, 0xd7, 0xef,
- 0x83, 0xef, 0xf2, 0x5d, 0xc0, 0x93, 0x34, 0x40, 0x4d, 0xdc, 0xae, 0x49, 0x3c, 0x49, 0xe9, 0x1e,
- 0x5c, 0x1b, 0xa5, 0x69, 0x3e, 0x0f, 0x70, 0xd3, 0x6b, 0xd7, 0xe4, 0xea, 0x47, 0xeb, 0xb3, 0x07,
- 0x95, 0x55, 0x4e, 0x77, 0xa0, 0xaa, 0x86, 0x61, 0xc8, 0x95, 0x22, 0x29, 0xad, 0xc3, 0xad, 0x13,
- 0x31, 0x30, 0x4a, 0x86, 0x86, 0x45, 0x91, 0x34, 0x5a, 0x08, 0x13, 0x8b, 0xe4, 0x98, 0x7c, 0x43,
- 0x6b, 0x16, 0x29, 0xfd, 0x1f, 0xfb, 0x8e, 0x68, 0x03, 0x0e, 0xd6, 0x5d, 0x7f, 0x18, 0xeb, 0x9e,
- 0x13, 0x7a, 0xc9, 0x33, 0x16, 0xf7, 0x22, 0xf2, 0xbb, 0xe4, 0xb6, 0xdd, 0xe6, 0x7f, 0x10, 0xbd,
- 0x0d, 0xd4, 0xf2, 0x44, 0x98, 0x50, 0x24, 0x89, 0xd1, 0xc2, 0xaa, 0xe4, 0x35, 0xa6, 0xfb, 0x70,
- 0xd3, 0x82, 0x90, 0x25, 0xda, 0x44, 0x3d, 0x16, 0xbb, 0xf9, 0x1b, 0x4c, 0xef, 0x42, 0x50, 0xce,
- 0xc5, 0x80, 0x27, 0xee, 0x68, 0xa5, 0x25, 0x67, 0x7d, 0x72, 0x86, 0xe9, 0x01, 0xec, 0x95, 0x58,
- 0x0d, 0x38, 0x7b, 0x6a, 0x24, 0x8f, 0xd9, 0x73, 0xf2, 0x16, 0xd3, 0x3b, 0xb0, 0x5f, 0x22, 0x37,
- 0xb4, 0xff, 0xa6, 0x78, 0xdc, 0x25, 0x1f, 0x1c, 0x54, 0xfa, 0xc2, 0x05, 0x7c, 0x3c, 0x87, 0xdb,
- 0x1b, 0xf8, 0x84, 0xe9, 0x21, 0xd4, 0xcb, 0x72, 0xfb, 0x11, 0x7f, 0x9c, 0x0b, 0x17, 0xef, 0xe0,
- 0x27, 0xb6, 0x3b, 0x70, 0xc2, 0xea, 0x52, 0x92, 0x77, 0x87, 0x8a, 0x47, 0xe4, 0xcc, 0xb3, 0x3b,
- 0xe8, 0xb3, 0xb8, 0x2b, 0x64, 0x9f, 0x47, 0xa6, 0xcf, 0x95, 0x62, 0xc7, 0x9c, 0xbc, 0xf3, 0x5a,
- 0x47, 0xe0, 0xdb, 0xaf, 0x95, 0x56, 0xc1, 0x3b, 0x11, 0x03, 0x82, 0xe8, 0x75, 0xf0, 0xed, 0x09,
- 0x04, 0x53, 0x80, 0x8a, 0xd2, 0x4c, 0x0f, 0x15, 0xf1, 0xec, 0x0b, 0x0e, 0x59, 0x62, 0xac, 0xe2,
- 0x3f, 0xa9, 0x7d, 0x59, 0x36, 0xd0, 0xd7, 0x65, 0x03, 0xfd, 0x5a, 0x36, 0xd0, 0xdf, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x52, 0xcc, 0x98, 0x82, 0x47, 0x03, 0x00, 0x00,
-}
-
-func (m *CircuitRelay) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CircuitRelay) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CircuitRelay) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Code != nil {
- i = encodeVarintCircuitv1(dAtA, i, uint64(*m.Code))
- i--
- dAtA[i] = 0x20
- }
- if m.DstPeer != nil {
- {
- size, err := m.DstPeer.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuitv1(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.SrcPeer != nil {
- {
- size, err := m.SrcPeer.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuitv1(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Type != nil {
- i = encodeVarintCircuitv1(dAtA, i, uint64(*m.Type))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CircuitRelay_Peer) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CircuitRelay_Peer) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CircuitRelay_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Addrs) > 0 {
- for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Addrs[iNdEx])
- copy(dAtA[i:], m.Addrs[iNdEx])
- i = encodeVarintCircuitv1(dAtA, i, uint64(len(m.Addrs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Id == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("id")
- } else {
- i -= len(m.Id)
- copy(dAtA[i:], m.Id)
- i = encodeVarintCircuitv1(dAtA, i, uint64(len(m.Id)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintCircuitv1(dAtA []byte, offset int, v uint64) int {
- offset -= sovCircuitv1(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *CircuitRelay) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != nil {
- n += 1 + sovCircuitv1(uint64(*m.Type))
- }
- if m.SrcPeer != nil {
- l = m.SrcPeer.Size()
- n += 1 + l + sovCircuitv1(uint64(l))
- }
- if m.DstPeer != nil {
- l = m.DstPeer.Size()
- n += 1 + l + sovCircuitv1(uint64(l))
- }
- if m.Code != nil {
- n += 1 + sovCircuitv1(uint64(*m.Code))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CircuitRelay_Peer) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != nil {
- l = len(m.Id)
- n += 1 + l + sovCircuitv1(uint64(l))
- }
- if len(m.Addrs) > 0 {
- for _, b := range m.Addrs {
- l = len(b)
- n += 1 + l + sovCircuitv1(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovCircuitv1(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozCircuitv1(x uint64) (n int) {
- return sovCircuitv1(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *CircuitRelay) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CircuitRelay: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CircuitRelay: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var v CircuitRelay_Type
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= CircuitRelay_Type(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Type = &v
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SrcPeer", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuitv1
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.SrcPeer == nil {
- m.SrcPeer = &CircuitRelay_Peer{}
- }
- if err := m.SrcPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DstPeer", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuitv1
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DstPeer == nil {
- m.DstPeer = &CircuitRelay_Peer{}
- }
- if err := m.DstPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
- }
- var v CircuitRelay_Status
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= CircuitRelay_Status(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Code = &v
- default:
- iNdEx = preIndex
- skippy, err := skipCircuitv1(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CircuitRelay_Peer) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Peer: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCircuitv1
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...)
- if m.Id == nil {
- m.Id = []byte{}
- }
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCircuitv1
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx))
- copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCircuitv1(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthCircuitv1
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id")
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipCircuitv1(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCircuitv1
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthCircuitv1
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupCircuitv1
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthCircuitv1
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthCircuitv1 = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowCircuitv1 = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupCircuitv1 = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto
deleted file mode 100644
index c591f0751..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto2";
-
-package circuitv1.pb;
-
-message CircuitRelay {
-
- enum Status {
- SUCCESS = 100;
- HOP_SRC_ADDR_TOO_LONG = 220;
- HOP_DST_ADDR_TOO_LONG = 221;
- HOP_SRC_MULTIADDR_INVALID = 250;
- HOP_DST_MULTIADDR_INVALID = 251;
- HOP_NO_CONN_TO_DST = 260;
- HOP_CANT_DIAL_DST = 261;
- HOP_CANT_OPEN_DST_STREAM = 262;
- HOP_CANT_SPEAK_RELAY = 270;
- HOP_CANT_RELAY_TO_SELF = 280;
- STOP_SRC_ADDR_TOO_LONG = 320;
- STOP_DST_ADDR_TOO_LONG = 321;
- STOP_SRC_MULTIADDR_INVALID = 350;
- STOP_DST_MULTIADDR_INVALID = 351;
- STOP_RELAY_REFUSED = 390;
- MALFORMED_MESSAGE = 400;
- }
-
- enum Type { // RPC identifier, either HOP, STOP or STATUS
- HOP = 1;
- STOP = 2;
- STATUS = 3;
- CAN_HOP = 4;
- }
-
- message Peer {
- required bytes id = 1; // peer id
- repeated bytes addrs = 2; // peer's known addresses
- }
-
- optional Type type = 1; // Type of the message
-
- optional Peer srcPeer = 2; // srcPeer and dstPeer are used when Type is HOP or STOP
- optional Peer dstPeer = 3;
-
- optional Status code = 4; // Status code, used when Type is STATUS
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go
deleted file mode 100644
index bfd2ed895..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package relay
-
-import (
- "github.com/libp2p/go-libp2p/core/peer"
-)
-
-type Resources struct {
- // MaxCircuits is the maximum number of active relay connections
- MaxCircuits int
-
- // MaxCircuitsPerPeer is the maximum number of active relay connections per peer
- MaxCircuitsPerPeer int
-
- // BufferSize is the buffer size for relaying in each direction
- BufferSize int
-}
-
-func DefaultResources() Resources {
- return Resources{
- MaxCircuits: 1024,
- MaxCircuitsPerPeer: 64,
- BufferSize: 4096,
- }
-}
-
-type ACLFilter interface {
- AllowHop(src, dest peer.ID) bool
-}
-
-type Option func(r *Relay) error
-
-// WithResources specifies resource limits for the relay
-func WithResources(rc Resources) Option {
- return func(r *Relay) error {
- r.rc = rc
- return nil
- }
-}
-
-// WithACL specifies an ACLFilter for access control
-func WithACL(acl ACLFilter) Option {
- return func(r *Relay) error {
- r.acl = acl
- return nil
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go
deleted file mode 100644
index 3b6f7adc8..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go
+++ /dev/null
@@ -1,452 +0,0 @@
-package relay
-
-import (
- "context"
- "fmt"
- "io"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/libp2p/go-libp2p/core/host"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
- pb "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb"
- "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
-
- logging "github.com/ipfs/go-log/v2"
- pool "github.com/libp2p/go-buffer-pool"
- ma "github.com/multiformats/go-multiaddr"
-)
-
-var log = logging.Logger("relay")
-
-const (
- ProtoID = "/libp2p/circuit/relay/0.1.0"
-
- ServiceName = "libp2p.relay/v1"
-
- StreamTimeout = time.Minute
- ConnectTimeout = 30 * time.Second
- HandshakeTimeout = time.Minute
-
- relayHopTag = "relay-v1-hop"
- relayHopTagValue = 2
-
- maxMessageSize = 4096
-)
-
-type Relay struct {
- closed int32
- ctx context.Context
- cancel context.CancelFunc
-
- host host.Host
- rc Resources
- acl ACLFilter
- scope network.ResourceScopeSpan
-
- mx sync.Mutex
- conns map[peer.ID]int
- active int
-}
-
-func NewRelay(h host.Host, opts ...Option) (*Relay, error) {
- r := &Relay{
- host: h,
- rc: DefaultResources(),
- conns: make(map[peer.ID]int),
- }
- r.ctx, r.cancel = context.WithCancel(context.Background())
-
- for _, opt := range opts {
- err := opt(r)
- if err != nil {
- return nil, fmt.Errorf("error applying relay option: %w", err)
- }
- }
-
- // get a scope for memory reservations at service level
- err := h.Network().ResourceManager().ViewService(ServiceName,
- func(s network.ServiceScope) error {
- var err error
- r.scope, err = s.BeginSpan()
- return err
- })
- if err != nil {
- return nil, err
- }
-
- h.SetStreamHandler(ProtoID, r.handleStream)
-
- return r, nil
-}
-
-func (r *Relay) Close() error {
- if atomic.CompareAndSwapInt32(&r.closed, 0, 1) {
- r.host.RemoveStreamHandler(ProtoID)
- r.scope.Done()
- r.cancel()
- }
- return nil
-}
-
-func (r *Relay) handleStream(s network.Stream) {
- log.Debugf("new relay stream from: %s", s.Conn().RemotePeer())
-
- if err := s.Scope().SetService(ServiceName); err != nil {
- log.Debugf("error attaching stream to relay service: %s", err)
- s.Reset()
- return
- }
-
- if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
- log.Debugf("error reserving memory for stream: %s", err)
- s.Reset()
- return
- }
- defer s.Scope().ReleaseMemory(maxMessageSize)
-
- rd := util.NewDelimitedReader(s, maxMessageSize)
- defer rd.Close()
-
- s.SetReadDeadline(time.Now().Add(StreamTimeout))
-
- var msg pb.CircuitRelay
-
- err := rd.ReadMsg(&msg)
- if err != nil {
- r.handleError(s, pb.CircuitRelay_MALFORMED_MESSAGE)
- return
- }
- s.SetReadDeadline(time.Time{})
-
- switch msg.GetType() {
- case pb.CircuitRelay_HOP:
- r.handleHopStream(s, &msg)
- case pb.CircuitRelay_CAN_HOP:
- r.handleCanHop(s, &msg)
- case pb.CircuitRelay_STOP:
- r.handleError(s, pb.CircuitRelay_STOP_RELAY_REFUSED)
- default:
- log.Warnf("unexpected relay handshake: %d", msg.GetType())
- r.handleError(s, pb.CircuitRelay_MALFORMED_MESSAGE)
- }
-}
-
-func (r *Relay) handleHopStream(s network.Stream, msg *pb.CircuitRelay) {
- span, err := r.scope.BeginSpan()
- if err != nil {
- log.Debugf("failed to begin relay transaction: %s", err)
- r.handleError(s, pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- fail := func(code pb.CircuitRelay_Status) {
- span.Done()
- r.handleError(s, code)
- }
-
- // reserve buffers for the relay
- if err := span.ReserveMemory(2*r.rc.BufferSize, network.ReservationPriorityHigh); err != nil {
- log.Debugf("error reserving memory for relay: %s", err)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- src, err := peerToPeerInfo(msg.GetSrcPeer())
- if err != nil {
- fail(pb.CircuitRelay_HOP_SRC_MULTIADDR_INVALID)
- return
- }
-
- if src.ID != s.Conn().RemotePeer() {
- fail(pb.CircuitRelay_HOP_SRC_MULTIADDR_INVALID)
- return
- }
-
- dest, err := peerToPeerInfo(msg.GetDstPeer())
- if err != nil {
- fail(pb.CircuitRelay_HOP_DST_MULTIADDR_INVALID)
- return
- }
-
- if dest.ID == r.host.ID() {
- fail(pb.CircuitRelay_HOP_CANT_RELAY_TO_SELF)
- return
- }
-
- if r.acl != nil && !r.acl.AllowHop(src.ID, dest.ID) {
- log.Debugf("refusing hop from %s to %s; ACL refused", src.ID, dest.ID)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- r.mx.Lock()
- if r.active >= r.rc.MaxCircuits {
- r.mx.Unlock()
- log.Debugf("refusing connection from %s to %s; too many active circuits", src.ID, dest.ID)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- srcConns := r.conns[src.ID]
- if srcConns >= r.rc.MaxCircuitsPerPeer {
- r.mx.Unlock()
- log.Debugf("refusing connection from %s to %s; too many connections from %s", src.ID, dest.ID, src)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- destConns := r.conns[dest.ID]
- if destConns >= r.rc.MaxCircuitsPerPeer {
- r.mx.Unlock()
- log.Debugf("refusing connection from %s to %s; too many connecitons to %s", src.ID, dest.ID, dest.ID)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- r.active++
- r.addConn(src.ID)
- r.addConn(src.ID)
- r.mx.Unlock()
-
- cleanup := func() {
- span.Done()
- r.mx.Lock()
- r.active--
- r.rmConn(src.ID)
- r.rmConn(dest.ID)
- r.mx.Unlock()
- }
-
- // open stream
- ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout)
- defer cancel()
-
- ctx = network.WithNoDial(ctx, "relay hop")
- bs, err := r.host.NewStream(ctx, dest.ID, ProtoID)
- if err != nil {
- log.Debugf("error opening relay stream to %s: %s", dest.ID.Pretty(), err.Error())
- if err == network.ErrNoConn {
- r.handleError(s, pb.CircuitRelay_HOP_NO_CONN_TO_DST)
- } else {
- r.handleError(s, pb.CircuitRelay_HOP_CANT_DIAL_DST)
- }
- cleanup()
- return
- }
-
- fail = func(code pb.CircuitRelay_Status) {
- bs.Reset()
- cleanup()
- r.handleError(s, code)
- }
-
- if err := bs.Scope().SetService(ServiceName); err != nil {
- log.Debugf("error attaching stream to relay service: %s", err)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
-
- // stop handshake
- if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
- log.Debugf("failed to reserve memory for stream: %s", err)
- fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
- }
- defer bs.Scope().ReleaseMemory(maxMessageSize)
-
- rd := util.NewDelimitedReader(bs, maxMessageSize)
- wr := util.NewDelimitedWriter(bs)
- defer rd.Close()
-
- // set handshake deadline
- bs.SetDeadline(time.Now().Add(HandshakeTimeout))
-
- msg.Type = pb.CircuitRelay_STOP.Enum()
-
- err = wr.WriteMsg(msg)
- if err != nil {
- log.Debugf("error writing stop handshake: %s", err.Error())
- fail(pb.CircuitRelay_HOP_CANT_OPEN_DST_STREAM)
- return
- }
-
- msg.Reset()
-
- err = rd.ReadMsg(msg)
- if err != nil {
- log.Debugf("error reading stop response: %s", err.Error())
- fail(pb.CircuitRelay_HOP_CANT_OPEN_DST_STREAM)
- return
- }
-
- if msg.GetType() != pb.CircuitRelay_STATUS {
- log.Debugf("unexpected relay stop response: not a status message (%d)", msg.GetType())
- fail(pb.CircuitRelay_HOP_CANT_OPEN_DST_STREAM)
- return
- }
-
- if msg.GetCode() != pb.CircuitRelay_SUCCESS {
- log.Debugf("relay stop failure: %d", msg.GetCode())
- fail(msg.GetCode())
- return
- }
-
- err = r.writeResponse(s, pb.CircuitRelay_SUCCESS)
- if err != nil {
- log.Debugf("error writing relay response: %s", err.Error())
- bs.Reset()
- s.Reset()
- cleanup()
- return
- }
-
- // relay connection
- log.Infof("relaying connection between %s and %s", src.ID.Pretty(), dest.ID.Pretty())
-
- // reset deadline
- bs.SetDeadline(time.Time{})
-
- goroutines := new(int32)
- *goroutines = 2
- done := func() {
- if atomic.AddInt32(goroutines, -1) == 0 {
- s.Close()
- bs.Close()
- cleanup()
- }
- }
-
- go r.relayConn(s, bs, src.ID, dest.ID, done)
- go r.relayConn(bs, s, dest.ID, src.ID, done)
-}
-
-func (r *Relay) addConn(p peer.ID) {
- conns := r.conns[p]
- conns++
- r.conns[p] = conns
- if conns == 1 {
- r.host.ConnManager().TagPeer(p, relayHopTag, relayHopTagValue)
- }
-}
-
-func (r *Relay) rmConn(p peer.ID) {
- conns := r.conns[p]
- conns--
- if conns > 0 {
- r.conns[p] = conns
- } else {
- delete(r.conns, p)
- r.host.ConnManager().UntagPeer(p, relayHopTag)
- }
-}
-
-func (r *Relay) relayConn(src, dest network.Stream, srcID, destID peer.ID, done func()) {
- defer done()
-
- buf := pool.Get(r.rc.BufferSize)
- defer pool.Put(buf)
-
- count, err := io.CopyBuffer(dest, src, buf)
- if err != nil {
- log.Debugf("relay copy error: %s", err)
- // Reset both.
- src.Reset()
- dest.Reset()
- } else {
- // propagate the close
- dest.CloseWrite()
- }
-
- log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID)
-}
-
-func (r *Relay) handleCanHop(s network.Stream, msg *pb.CircuitRelay) {
- err := r.writeResponse(s, pb.CircuitRelay_SUCCESS)
-
- if err != nil {
- s.Reset()
- log.Debugf("error writing relay response: %s", err.Error())
- } else {
- s.Close()
- }
-}
-
-func (r *Relay) handleError(s network.Stream, code pb.CircuitRelay_Status) {
- log.Warnf("relay error: %s", code)
- err := r.writeResponse(s, code)
- if err != nil {
- s.Reset()
- log.Debugf("error writing relay response: %s", err.Error())
- } else {
- s.Close()
- }
-}
-
-// Queries a peer for support of hop relay
-func CanHop(ctx context.Context, host host.Host, id peer.ID) (bool, error) {
- s, err := host.NewStream(ctx, id, ProtoID)
- if err != nil {
- return false, err
- }
- defer s.Close()
-
- rd := util.NewDelimitedReader(s, maxMessageSize)
- wr := util.NewDelimitedWriter(s)
- defer rd.Close()
-
- var msg pb.CircuitRelay
-
- msg.Type = pb.CircuitRelay_CAN_HOP.Enum()
-
- if err := wr.WriteMsg(&msg); err != nil {
- s.Reset()
- return false, err
- }
-
- msg.Reset()
-
- if err := rd.ReadMsg(&msg); err != nil {
- s.Reset()
- return false, err
- }
-
- if msg.GetType() != pb.CircuitRelay_STATUS {
- return false, fmt.Errorf("unexpected relay response; not a status message (%d)", msg.GetType())
- }
-
- return msg.GetCode() == pb.CircuitRelay_SUCCESS, nil
-}
-
-func (r *Relay) writeResponse(s network.Stream, code pb.CircuitRelay_Status) error {
- wr := util.NewDelimitedWriter(s)
-
- var msg pb.CircuitRelay
- msg.Type = pb.CircuitRelay_STATUS.Enum()
- msg.Code = code.Enum()
-
- return wr.WriteMsg(&msg)
-}
-
-func peerToPeerInfo(p *pb.CircuitRelay_Peer) (peer.AddrInfo, error) {
- if p == nil {
- return peer.AddrInfo{}, fmt.Errorf("nil peer")
- }
-
- id, err := peer.IDFromBytes(p.Id)
- if err != nil {
- return peer.AddrInfo{}, err
- }
-
- addrs := make([]ma.Multiaddr, 0, len(p.Addrs))
- for _, addrBytes := range p.Addrs {
- a, err := ma.NewMultiaddrBytes(addrBytes)
- if err == nil {
- addrs = append(addrs, a)
- }
- }
-
- return peer.AddrInfo{ID: id, Addrs: addrs}, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go
index aa302e7e1..c22436bca 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go
@@ -66,13 +66,11 @@ func New(h host.Host, upgrader transport.Upgrader) (*Client, error) {
// Start registers the circuit (client) protocol stream handlers
func (c *Client) Start() {
- c.host.SetStreamHandler(proto.ProtoIDv1, c.handleStreamV1)
c.host.SetStreamHandler(proto.ProtoIDv2Stop, c.handleStreamV2)
}
func (c *Client) Close() error {
c.ctxCancel()
- c.host.RemoveStreamHandler(proto.ProtoIDv1)
c.host.RemoveStreamHandler(proto.ProtoIDv2Stop)
return nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go
index 23bde93d5..ed01be3b3 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go
@@ -7,6 +7,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
@@ -79,7 +80,7 @@ func (c *Conn) RemoteMultiaddr() ma.Multiaddr {
// TODO: We should be able to do this directly without converting to/from a string.
relayAddr, err := ma.NewComponent(
ma.ProtocolWithCode(ma.P_P2P).Name,
- c.stream.Conn().RemotePeer().Pretty(),
+ c.stream.Conn().RemotePeer().String(),
)
if err != nil {
panic(err)
@@ -102,8 +103,8 @@ func (c *Conn) LocalAddr() net.Addr {
func (c *Conn) RemoteAddr() net.Addr {
return &NetAddr{
- Relay: c.stream.Conn().RemotePeer().Pretty(),
- Remote: c.remote.ID.Pretty(),
+ Relay: c.stream.Conn().RemotePeer().String(),
+ Remote: c.remote.ID.String(),
}
}
@@ -143,3 +144,20 @@ func (c *Conn) untagHop() {
delete(c.client.hopCount, p)
}
}
+
+type capableConnWithStat interface {
+ tpt.CapableConn
+ network.ConnStat
+}
+
+type capableConn struct {
+ capableConnWithStat
+}
+
+var transportName = ma.ProtocolWithCode(ma.P_CIRCUIT).Name
+
+func (c capableConn) ConnState() network.ConnectionState {
+ return network.ConnectionState{
+ Transport: transportName,
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go
index 2e5fc73b5..ecf5d3a51 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go
@@ -8,7 +8,6 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
- pbv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb"
pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
@@ -124,25 +123,14 @@ func (c *Client) dialPeer(ctx context.Context, relay, dest peer.AddrInfo) (*Conn
dialCtx, cancel := context.WithTimeout(ctx, DialRelayTimeout)
defer cancel()
- s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop, proto.ProtoIDv1)
+ s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop)
if err != nil {
return nil, fmt.Errorf("error opening hop stream to relay: %w", err)
}
-
- switch s.Protocol() {
- case proto.ProtoIDv2Hop:
- return c.connectV2(s, dest)
-
- case proto.ProtoIDv1:
- return c.connectV1(s, dest)
-
- default:
- s.Reset()
- return nil, fmt.Errorf("unexpected stream protocol: %s", s.Protocol())
- }
+ return c.connect(s, dest)
}
-func (c *Client) connectV2(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
+func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
s.Reset()
return nil, err
@@ -199,52 +187,3 @@ func (c *Client) connectV2(s network.Stream, dest peer.AddrInfo) (*Conn, error)
return &Conn{stream: s, remote: dest, stat: stat, client: c}, nil
}
-
-func (c *Client) connectV1(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
- if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
- s.Reset()
- return nil, err
- }
- defer s.Scope().ReleaseMemory(maxMessageSize)
-
- rd := util.NewDelimitedReader(s, maxMessageSize)
- wr := util.NewDelimitedWriter(s)
- defer rd.Close()
-
- var msg pbv1.CircuitRelay
-
- msg.Type = pbv1.CircuitRelay_HOP.Enum()
- msg.SrcPeer = util.PeerInfoToPeerV1(c.host.Peerstore().PeerInfo(c.host.ID()))
- msg.DstPeer = util.PeerInfoToPeerV1(dest)
-
- s.SetDeadline(time.Now().Add(DialTimeout))
-
- err := wr.WriteMsg(&msg)
- if err != nil {
- s.Reset()
- return nil, err
- }
-
- msg.Reset()
-
- err = rd.ReadMsg(&msg)
- if err != nil {
- s.Reset()
- return nil, err
- }
-
- s.SetDeadline(time.Time{})
-
- if msg.GetType() != pbv1.CircuitRelay_STATUS {
- s.Reset()
- return nil, newRelayError("unexpected relay response; not a status message (%d)", msg.GetType())
- }
-
- status := msg.GetCode()
- if status != pbv1.CircuitRelay_SUCCESS {
- s.Reset()
- return nil, newRelayError("error opening relay circuit: %s (%d)", pbv1.CircuitRelay_Status_name[int32(status)], status)
- }
-
- return &Conn{stream: s, remote: dest, client: c}, nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go
index ef50b8e82..6b5361b12 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go
@@ -4,7 +4,6 @@ import (
"time"
"github.com/libp2p/go-libp2p/core/network"
- pbv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb"
pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
)
@@ -87,85 +86,3 @@ func (c *Client) handleStreamV2(s network.Stream) {
handleError(pbv2.Status_CONNECTION_FAILED)
}
}
-
-func (c *Client) handleStreamV1(s network.Stream) {
- log.Debugf("new relay/v1 stream from: %s", s.Conn().RemotePeer())
-
- s.SetReadDeadline(time.Now().Add(StreamTimeout))
-
- rd := util.NewDelimitedReader(s, maxMessageSize)
- defer rd.Close()
-
- writeResponse := func(status pbv1.CircuitRelay_Status) error {
- wr := util.NewDelimitedWriter(s)
-
- var msg pbv1.CircuitRelay
- msg.Type = pbv1.CircuitRelay_STATUS.Enum()
- msg.Code = status.Enum()
-
- return wr.WriteMsg(&msg)
- }
-
- handleError := func(status pbv1.CircuitRelay_Status) {
- log.Debugf("protocol error: %s (%d)", pbv1.CircuitRelay_Status_name[int32(status)], status)
- err := writeResponse(status)
- if err != nil {
- s.Reset()
- log.Debugf("error writing circuit response: %s", err.Error())
- } else {
- s.Close()
- }
- }
-
- var msg pbv1.CircuitRelay
-
- err := rd.ReadMsg(&msg)
- if err != nil {
- handleError(pbv1.CircuitRelay_MALFORMED_MESSAGE)
- return
- }
- // reset stream deadline as message has been read
- s.SetReadDeadline(time.Time{})
-
- switch msg.GetType() {
- case pbv1.CircuitRelay_STOP:
-
- case pbv1.CircuitRelay_HOP:
- handleError(pbv1.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
-
- case pbv1.CircuitRelay_CAN_HOP:
- handleError(pbv1.CircuitRelay_HOP_CANT_SPEAK_RELAY)
- return
-
- default:
- log.Debugf("unexpected relay handshake: %d", msg.GetType())
- handleError(pbv1.CircuitRelay_MALFORMED_MESSAGE)
- return
- }
-
- src, err := util.PeerToPeerInfoV1(msg.GetSrcPeer())
- if err != nil {
- handleError(pbv1.CircuitRelay_STOP_SRC_MULTIADDR_INVALID)
- return
- }
-
- dst, err := util.PeerToPeerInfoV1(msg.GetDstPeer())
- if err != nil || dst.ID != c.host.ID() {
- handleError(pbv1.CircuitRelay_STOP_DST_MULTIADDR_INVALID)
- return
- }
-
- log.Debugf("incoming relay connection from: %s", src.ID)
-
- select {
- case c.incoming <- accept{
- conn: &Conn{stream: s, remote: src, client: c},
- writeResponse: func() error {
- return writeResponse(pbv1.CircuitRelay_SUCCESS)
- },
- }:
- case <-time.After(AcceptTimeout):
- handleError(pbv1.CircuitRelay_STOP_RELAY_REFUSED)
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/listen.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/listen.go
index 0d44ac726..6f5050c2a 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/listen.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/listen.go
@@ -1,9 +1,9 @@
package client
import (
- "errors"
"net"
+ "github.com/libp2p/go-libp2p/core/transport"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -33,7 +33,7 @@ func (l *Listener) Accept() (manet.Conn, error) {
return evt.conn, nil
case <-l.ctx.Done():
- return nil, errors.New("circuit v2 client closed")
+ return nil, transport.ErrListenerClosed
}
}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go
index 1cd451ad1..dbb924193 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go
@@ -37,6 +37,27 @@ type Reservation struct {
Voucher *proto.ReservationVoucher
}
+// ReservationError is the error returned on failure to reserve a slot in the relay
+type ReservationError struct {
+
+ // Status is the status returned by the relay for rejecting the reservation
+ // request. It is set to pbv2.Status_CONNECTION_FAILED on other failures
+ Status pbv2.Status
+
+ // Reason is the reason for reservation failure
+ Reason string
+
+ err error
+}
+
+func (re ReservationError) Error() string {
+ return fmt.Sprintf("reservation error: status: %s reason: %s err: %s", pbv2.Status_name[int32(re.Status)], re.Reason, re.err)
+}
+
+func (re ReservationError) Unwrap() error {
+ return re.err
+}
+
// Reserve reserves a slot in a relay and returns the reservation information.
// Clients must reserve slots in order for the relay to relay connections to them.
func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation, error) {
@@ -46,7 +67,7 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
s, err := h.NewStream(ctx, ai.ID, proto.ProtoIDv2Hop)
if err != nil {
- return nil, err
+ return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "failed to open stream", err: err}
}
defer s.Close()
@@ -61,33 +82,39 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
if err := wr.WriteMsg(&msg); err != nil {
s.Reset()
- return nil, fmt.Errorf("error writing reservation message: %w", err)
+ return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error writing reservation message", err: err}
}
msg.Reset()
if err := rd.ReadMsg(&msg); err != nil {
s.Reset()
- return nil, fmt.Errorf("error reading reservation response message: %w", err)
+ return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error reading reservation response message: %w", err: err}
}
if msg.GetType() != pbv2.HopMessage_STATUS {
- return nil, fmt.Errorf("unexpected relay response: not a status message (%d)", msg.GetType())
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("unexpected relay response: not a status message (%d)", msg.GetType()),
+ err: err}
}
if status := msg.GetStatus(); status != pbv2.Status_OK {
- return nil, fmt.Errorf("reservation failed: %s (%d)", pbv2.Status_name[int32(status)], status)
+ return nil, ReservationError{Status: msg.GetStatus(), Reason: "reservation failed"}
}
rsvp := msg.GetReservation()
if rsvp == nil {
- return nil, fmt.Errorf("missing reservation info")
+ return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: "missing reservation info"}
}
result := &Reservation{}
result.Expiration = time.Unix(int64(rsvp.GetExpire()), 0)
if result.Expiration.Before(time.Now()) {
- return nil, fmt.Errorf("received reservation with expiration date in the past: %s", result.Expiration)
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("received reservation with expiration date in the past: %s", result.Expiration),
+ }
}
addrs := rsvp.GetAddrs()
@@ -105,12 +132,19 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation,
if voucherBytes != nil {
_, rec, err := record.ConsumeEnvelope(voucherBytes, proto.RecordDomain)
if err != nil {
- return nil, fmt.Errorf("error consuming voucher envelope: %w", err)
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("error consuming voucher envelope: %s", err),
+ err: err,
+ }
}
voucher, ok := rec.(*proto.ReservationVoucher)
if !ok {
- return nil, fmt.Errorf("unexpected voucher record type: %+T", rec)
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("unexpected voucher record type: %+T", rec),
+ }
}
result.Voucher = voucher
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go
index 97fc1ce1a..e08d55707 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go
@@ -53,17 +53,28 @@ func (c *Client) Dial(ctx context.Context, a ma.Multiaddr, p peer.ID) (transport
if err != nil {
return nil, err
}
- if err := connScope.SetPeer(p); err != nil {
+ conn, err := c.dialAndUpgrade(ctx, a, p, connScope)
+ if err != nil {
connScope.Done()
return nil, err
}
+ return conn, nil
+}
+
+func (c *Client) dialAndUpgrade(ctx context.Context, a ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
+ if err := connScope.SetPeer(p); err != nil {
+ return nil, err
+ }
conn, err := c.dial(ctx, a, p)
if err != nil {
- connScope.Done()
return nil, err
}
conn.tagHop()
- return c.upgrader.Upgrade(ctx, c, conn, network.DirOutbound, p, connScope)
+ cc, err := c.upgrader.Upgrade(ctx, c, conn, network.DirOutbound, p, connScope)
+ if err != nil {
+ return nil, err
+ }
+ return capableConn{cc.(capableConnWithStat)}, nil
}
func (c *Client) CanDial(addr ma.Multiaddr) bool {
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile
deleted file mode 100644
index c360a6fb9..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --gogofast_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go
index 9cbff1ac0..d4d285a30 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go
@@ -1,31 +1,30 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: circuit.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/circuit.proto
-package circuit_pb
+package pb
import (
- fmt "fmt"
- github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type Status int32
const (
+ // zero value field required for proto3 compatibility
+ Status_UNUSED Status = 0
Status_OK Status = 100
Status_RESERVATION_REFUSED Status = 200
Status_RESOURCE_LIMIT_EXCEEDED Status = 201
@@ -36,27 +35,31 @@ const (
Status_UNEXPECTED_MESSAGE Status = 401
)
-var Status_name = map[int32]string{
- 100: "OK",
- 200: "RESERVATION_REFUSED",
- 201: "RESOURCE_LIMIT_EXCEEDED",
- 202: "PERMISSION_DENIED",
- 203: "CONNECTION_FAILED",
- 204: "NO_RESERVATION",
- 400: "MALFORMED_MESSAGE",
- 401: "UNEXPECTED_MESSAGE",
-}
-
-var Status_value = map[string]int32{
- "OK": 100,
- "RESERVATION_REFUSED": 200,
- "RESOURCE_LIMIT_EXCEEDED": 201,
- "PERMISSION_DENIED": 202,
- "CONNECTION_FAILED": 203,
- "NO_RESERVATION": 204,
- "MALFORMED_MESSAGE": 400,
- "UNEXPECTED_MESSAGE": 401,
-}
+// Enum value maps for Status.
+var (
+ Status_name = map[int32]string{
+ 0: "UNUSED",
+ 100: "OK",
+ 200: "RESERVATION_REFUSED",
+ 201: "RESOURCE_LIMIT_EXCEEDED",
+ 202: "PERMISSION_DENIED",
+ 203: "CONNECTION_FAILED",
+ 204: "NO_RESERVATION",
+ 400: "MALFORMED_MESSAGE",
+ 401: "UNEXPECTED_MESSAGE",
+ }
+ Status_value = map[string]int32{
+ "UNUSED": 0,
+ "OK": 100,
+ "RESERVATION_REFUSED": 200,
+ "RESOURCE_LIMIT_EXCEEDED": 201,
+ "PERMISSION_DENIED": 202,
+ "CONNECTION_FAILED": 203,
+ "NO_RESERVATION": 204,
+ "MALFORMED_MESSAGE": 400,
+ "UNEXPECTED_MESSAGE": 401,
+ }
+)
func (x Status) Enum() *Status {
p := new(Status)
@@ -65,20 +68,24 @@ func (x Status) Enum() *Status {
}
func (x Status) String() string {
- return proto.EnumName(Status_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (x *Status) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Status_value, data, "Status")
- if err != nil {
- return err
- }
- *x = Status(value)
- return nil
+func (Status) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_circuit_proto_enumTypes[0].Descriptor()
}
+func (Status) Type() protoreflect.EnumType {
+ return &file_pb_circuit_proto_enumTypes[0]
+}
+
+func (x Status) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Status.Descriptor instead.
func (Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{0}
+ return file_pb_circuit_proto_rawDescGZIP(), []int{0}
}
type HopMessage_Type int32
@@ -89,17 +96,19 @@ const (
HopMessage_STATUS HopMessage_Type = 2
)
-var HopMessage_Type_name = map[int32]string{
- 0: "RESERVE",
- 1: "CONNECT",
- 2: "STATUS",
-}
-
-var HopMessage_Type_value = map[string]int32{
- "RESERVE": 0,
- "CONNECT": 1,
- "STATUS": 2,
-}
+// Enum value maps for HopMessage_Type.
+var (
+ HopMessage_Type_name = map[int32]string{
+ 0: "RESERVE",
+ 1: "CONNECT",
+ 2: "STATUS",
+ }
+ HopMessage_Type_value = map[string]int32{
+ "RESERVE": 0,
+ "CONNECT": 1,
+ "STATUS": 2,
+ }
+)
func (x HopMessage_Type) Enum() *HopMessage_Type {
p := new(HopMessage_Type)
@@ -108,20 +117,24 @@ func (x HopMessage_Type) Enum() *HopMessage_Type {
}
func (x HopMessage_Type) String() string {
- return proto.EnumName(HopMessage_Type_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (x *HopMessage_Type) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(HopMessage_Type_value, data, "HopMessage_Type")
- if err != nil {
- return err
- }
- *x = HopMessage_Type(value)
- return nil
+func (HopMessage_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_circuit_proto_enumTypes[1].Descriptor()
}
+func (HopMessage_Type) Type() protoreflect.EnumType {
+ return &file_pb_circuit_proto_enumTypes[1]
+}
+
+func (x HopMessage_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HopMessage_Type.Descriptor instead.
func (HopMessage_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{0, 0}
+ return file_pb_circuit_proto_rawDescGZIP(), []int{0, 0}
}
type StopMessage_Type int32
@@ -131,15 +144,17 @@ const (
StopMessage_STATUS StopMessage_Type = 1
)
-var StopMessage_Type_name = map[int32]string{
- 0: "CONNECT",
- 1: "STATUS",
-}
-
-var StopMessage_Type_value = map[string]int32{
- "CONNECT": 0,
- "STATUS": 1,
-}
+// Enum value maps for StopMessage_Type.
+var (
+ StopMessage_Type_name = map[int32]string{
+ 0: "CONNECT",
+ 1: "STATUS",
+ }
+ StopMessage_Type_value = map[string]int32{
+ "CONNECT": 0,
+ "STATUS": 1,
+ }
+)
func (x StopMessage_Type) Enum() *StopMessage_Type {
p := new(StopMessage_Type)
@@ -148,1611 +163,565 @@ func (x StopMessage_Type) Enum() *StopMessage_Type {
}
func (x StopMessage_Type) String() string {
- return proto.EnumName(StopMessage_Type_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (x *StopMessage_Type) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(StopMessage_Type_value, data, "StopMessage_Type")
- if err != nil {
- return err
- }
- *x = StopMessage_Type(value)
- return nil
+func (StopMessage_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_circuit_proto_enumTypes[2].Descriptor()
}
-func (StopMessage_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{1, 0}
+func (StopMessage_Type) Type() protoreflect.EnumType {
+ return &file_pb_circuit_proto_enumTypes[2]
}
-type HopMessage struct {
- Type *HopMessage_Type `protobuf:"varint,1,req,name=type,enum=circuit.pb.HopMessage_Type" json:"type,omitempty"`
- Peer *Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"`
- Reservation *Reservation `protobuf:"bytes,3,opt,name=reservation" json:"reservation,omitempty"`
- Limit *Limit `protobuf:"bytes,4,opt,name=limit" json:"limit,omitempty"`
- Status *Status `protobuf:"varint,5,opt,name=status,enum=circuit.pb.Status" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HopMessage) Reset() { *m = HopMessage{} }
-func (m *HopMessage) String() string { return proto.CompactTextString(m) }
-func (*HopMessage) ProtoMessage() {}
-func (*HopMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{0}
-}
-func (m *HopMessage) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HopMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HopMessage.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HopMessage) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HopMessage.Merge(m, src)
-}
-func (m *HopMessage) XXX_Size() int {
- return m.Size()
-}
-func (m *HopMessage) XXX_DiscardUnknown() {
- xxx_messageInfo_HopMessage.DiscardUnknown(m)
+func (x StopMessage_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
}
-var xxx_messageInfo_HopMessage proto.InternalMessageInfo
-
-func (m *HopMessage) GetType() HopMessage_Type {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return HopMessage_RESERVE
+// Deprecated: Use StopMessage_Type.Descriptor instead.
+func (StopMessage_Type) EnumDescriptor() ([]byte, []int) {
+ return file_pb_circuit_proto_rawDescGZIP(), []int{1, 0}
}
-func (m *HopMessage) GetPeer() *Peer {
- if m != nil {
- return m.Peer
- }
- return nil
-}
+type HopMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *HopMessage) GetReservation() *Reservation {
- if m != nil {
- return m.Reservation
- }
- return nil
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Type *HopMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=circuit.pb.HopMessage_Type,oneof" json:"type,omitempty"`
+ Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ Reservation *Reservation `protobuf:"bytes,3,opt,name=reservation,proto3,oneof" json:"reservation,omitempty"`
+ Limit *Limit `protobuf:"bytes,4,opt,name=limit,proto3,oneof" json:"limit,omitempty"`
+ Status *Status `protobuf:"varint,5,opt,name=status,proto3,enum=circuit.pb.Status,oneof" json:"status,omitempty"`
}
-func (m *HopMessage) GetLimit() *Limit {
- if m != nil {
- return m.Limit
+func (x *HopMessage) Reset() {
+ *x = HopMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_circuit_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (m *HopMessage) GetStatus() Status {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return Status_OK
+func (x *HopMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type StopMessage struct {
- Type *StopMessage_Type `protobuf:"varint,1,req,name=type,enum=circuit.pb.StopMessage_Type" json:"type,omitempty"`
- Peer *Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"`
- Limit *Limit `protobuf:"bytes,3,opt,name=limit" json:"limit,omitempty"`
- Status *Status `protobuf:"varint,4,opt,name=status,enum=circuit.pb.Status" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StopMessage) Reset() { *m = StopMessage{} }
-func (m *StopMessage) String() string { return proto.CompactTextString(m) }
-func (*StopMessage) ProtoMessage() {}
-func (*StopMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{1}
-}
-func (m *StopMessage) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StopMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_StopMessage.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (*HopMessage) ProtoMessage() {}
+
+func (x *HopMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_circuit_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *StopMessage) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StopMessage.Merge(m, src)
-}
-func (m *StopMessage) XXX_Size() int {
- return m.Size()
-}
-func (m *StopMessage) XXX_DiscardUnknown() {
- xxx_messageInfo_StopMessage.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_StopMessage proto.InternalMessageInfo
+// Deprecated: Use HopMessage.ProtoReflect.Descriptor instead.
+func (*HopMessage) Descriptor() ([]byte, []int) {
+ return file_pb_circuit_proto_rawDescGZIP(), []int{0}
+}
-func (m *StopMessage) GetType() StopMessage_Type {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *HopMessage) GetType() HopMessage_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
- return StopMessage_CONNECT
+ return HopMessage_RESERVE
}
-func (m *StopMessage) GetPeer() *Peer {
- if m != nil {
- return m.Peer
+func (x *HopMessage) GetPeer() *Peer {
+ if x != nil {
+ return x.Peer
}
return nil
}
-func (m *StopMessage) GetLimit() *Limit {
- if m != nil {
- return m.Limit
+func (x *HopMessage) GetReservation() *Reservation {
+ if x != nil {
+ return x.Reservation
}
return nil
}
-func (m *StopMessage) GetStatus() Status {
- if m != nil && m.Status != nil {
- return *m.Status
+func (x *HopMessage) GetLimit() *Limit {
+ if x != nil {
+ return x.Limit
}
- return Status_OK
-}
-
-type Peer struct {
- Id []byte `protobuf:"bytes,1,req,name=id" json:"id,omitempty"`
- Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ return nil
}
-func (m *Peer) Reset() { *m = Peer{} }
-func (m *Peer) String() string { return proto.CompactTextString(m) }
-func (*Peer) ProtoMessage() {}
-func (*Peer) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{2}
-}
-func (m *Peer) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Peer.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (x *HopMessage) GetStatus() Status {
+ if x != nil && x.Status != nil {
+ return *x.Status
}
-}
-func (m *Peer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Peer.Merge(m, src)
-}
-func (m *Peer) XXX_Size() int {
- return m.Size()
-}
-func (m *Peer) XXX_DiscardUnknown() {
- xxx_messageInfo_Peer.DiscardUnknown(m)
+ return Status_UNUSED
}
-var xxx_messageInfo_Peer proto.InternalMessageInfo
+type StopMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Peer) GetId() []byte {
- if m != nil {
- return m.Id
- }
- return nil
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Type *StopMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=circuit.pb.StopMessage_Type,oneof" json:"type,omitempty"`
+ Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ Limit *Limit `protobuf:"bytes,3,opt,name=limit,proto3,oneof" json:"limit,omitempty"`
+ Status *Status `protobuf:"varint,4,opt,name=status,proto3,enum=circuit.pb.Status,oneof" json:"status,omitempty"`
}
-func (m *Peer) GetAddrs() [][]byte {
- if m != nil {
- return m.Addrs
+func (x *StopMessage) Reset() {
+ *x = StopMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_circuit_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-type Reservation struct {
- Expire *uint64 `protobuf:"varint,1,req,name=expire" json:"expire,omitempty"`
- Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
- Voucher []byte `protobuf:"bytes,3,opt,name=voucher" json:"voucher,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *StopMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Reservation) Reset() { *m = Reservation{} }
-func (m *Reservation) String() string { return proto.CompactTextString(m) }
-func (*Reservation) ProtoMessage() {}
-func (*Reservation) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{3}
-}
-func (m *Reservation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Reservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Reservation.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+func (*StopMessage) ProtoMessage() {}
+
+func (x *StopMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_circuit_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *Reservation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Reservation.Merge(m, src)
-}
-func (m *Reservation) XXX_Size() int {
- return m.Size()
-}
-func (m *Reservation) XXX_DiscardUnknown() {
- xxx_messageInfo_Reservation.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Reservation proto.InternalMessageInfo
+// Deprecated: Use StopMessage.ProtoReflect.Descriptor instead.
+func (*StopMessage) Descriptor() ([]byte, []int) {
+ return file_pb_circuit_proto_rawDescGZIP(), []int{1}
+}
-func (m *Reservation) GetExpire() uint64 {
- if m != nil && m.Expire != nil {
- return *m.Expire
+func (x *StopMessage) GetType() StopMessage_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
- return 0
+ return StopMessage_CONNECT
}
-func (m *Reservation) GetAddrs() [][]byte {
- if m != nil {
- return m.Addrs
+func (x *StopMessage) GetPeer() *Peer {
+ if x != nil {
+ return x.Peer
}
return nil
}
-func (m *Reservation) GetVoucher() []byte {
- if m != nil {
- return m.Voucher
+func (x *StopMessage) GetLimit() *Limit {
+ if x != nil {
+ return x.Limit
}
return nil
}
-type Limit struct {
- Duration *uint32 `protobuf:"varint,1,opt,name=duration" json:"duration,omitempty"`
- Data *uint64 `protobuf:"varint,2,opt,name=data" json:"data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Limit) Reset() { *m = Limit{} }
-func (m *Limit) String() string { return proto.CompactTextString(m) }
-func (*Limit) ProtoMessage() {}
-func (*Limit) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed01bbc211f15e47, []int{4}
-}
-func (m *Limit) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Limit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Limit.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (x *StopMessage) GetStatus() Status {
+ if x != nil && x.Status != nil {
+ return *x.Status
}
-}
-func (m *Limit) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Limit.Merge(m, src)
-}
-func (m *Limit) XXX_Size() int {
- return m.Size()
-}
-func (m *Limit) XXX_DiscardUnknown() {
- xxx_messageInfo_Limit.DiscardUnknown(m)
+ return Status_UNUSED
}
-var xxx_messageInfo_Limit proto.InternalMessageInfo
+type Peer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Limit) GetDuration() uint32 {
- if m != nil && m.Duration != nil {
- return *m.Duration
- }
- return 0
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Id []byte `protobuf:"bytes,1,opt,name=id,proto3,oneof" json:"id,omitempty"`
+ Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"`
}
-func (m *Limit) GetData() uint64 {
- if m != nil && m.Data != nil {
- return *m.Data
+func (x *Peer) Reset() {
+ *x = Peer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_circuit_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func init() {
- proto.RegisterEnum("circuit.pb.Status", Status_name, Status_value)
- proto.RegisterEnum("circuit.pb.HopMessage_Type", HopMessage_Type_name, HopMessage_Type_value)
- proto.RegisterEnum("circuit.pb.StopMessage_Type", StopMessage_Type_name, StopMessage_Type_value)
- proto.RegisterType((*HopMessage)(nil), "circuit.pb.HopMessage")
- proto.RegisterType((*StopMessage)(nil), "circuit.pb.StopMessage")
- proto.RegisterType((*Peer)(nil), "circuit.pb.Peer")
- proto.RegisterType((*Reservation)(nil), "circuit.pb.Reservation")
- proto.RegisterType((*Limit)(nil), "circuit.pb.Limit")
-}
-
-func init() { proto.RegisterFile("circuit.proto", fileDescriptor_ed01bbc211f15e47) }
-
-var fileDescriptor_ed01bbc211f15e47 = []byte{
- // 514 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcf, 0x8a, 0xd3, 0x50,
- 0x18, 0xc5, 0xe7, 0xa6, 0x69, 0x47, 0xbe, 0x76, 0x4a, 0xe6, 0x1b, 0x99, 0x06, 0x1d, 0x6a, 0x29,
- 0x82, 0x65, 0x90, 0x2a, 0xdd, 0x88, 0xcb, 0xda, 0x7c, 0xd5, 0x60, 0x93, 0x94, 0x7b, 0x53, 0x99,
- 0x5d, 0x89, 0xcd, 0x45, 0x03, 0x6a, 0x43, 0x92, 0x0e, 0xce, 0x5b, 0xe8, 0x23, 0xf8, 0x22, 0xae,
- 0xc7, 0x3f, 0x0b, 0xf7, 0x6e, 0xa4, 0x4f, 0x22, 0xb9, 0xe9, 0xb4, 0x19, 0x10, 0x14, 0xdc, 0xf5,
- 0xdc, 0x73, 0x0e, 0xb7, 0xbf, 0x73, 0x03, 0x07, 0x8b, 0x28, 0x59, 0xac, 0xa2, 0xac, 0x1f, 0x27,
- 0xcb, 0x6c, 0x89, 0xb0, 0x95, 0x2f, 0xbb, 0x9f, 0x34, 0x80, 0x67, 0xcb, 0xd8, 0x91, 0x69, 0x1a,
- 0xbc, 0x92, 0xf8, 0x00, 0xf4, 0xec, 0x22, 0x96, 0x26, 0xeb, 0x68, 0xbd, 0xe6, 0xe0, 0x76, 0x7f,
- 0x97, 0xec, 0xef, 0x52, 0x7d, 0xff, 0x22, 0x96, 0x5c, 0x05, 0xf1, 0x2e, 0xe8, 0xb1, 0x94, 0x89,
- 0xa9, 0x75, 0x58, 0xaf, 0x3e, 0x30, 0xca, 0x85, 0xa9, 0x94, 0x09, 0x57, 0x2e, 0x3e, 0x86, 0x7a,
- 0x22, 0x53, 0x99, 0x9c, 0x07, 0x59, 0xb4, 0x7c, 0x67, 0x56, 0x54, 0xb8, 0x55, 0x0e, 0xf3, 0x9d,
- 0xcd, 0xcb, 0x59, 0xbc, 0x07, 0xd5, 0x37, 0xd1, 0xdb, 0x28, 0x33, 0x75, 0x55, 0x3a, 0x2c, 0x97,
- 0x26, 0xb9, 0xc1, 0x0b, 0x1f, 0x4f, 0xa1, 0x96, 0x66, 0x41, 0xb6, 0x4a, 0xcd, 0x6a, 0x87, 0xf5,
- 0x9a, 0x03, 0x2c, 0x27, 0x85, 0x72, 0xf8, 0x26, 0xd1, 0xbd, 0x0f, 0x7a, 0xce, 0x80, 0x75, 0xd8,
- 0xe7, 0x24, 0x88, 0xbf, 0x20, 0x63, 0x2f, 0x17, 0x23, 0xcf, 0x75, 0x69, 0xe4, 0x1b, 0x0c, 0x01,
- 0x6a, 0xc2, 0x1f, 0xfa, 0x33, 0x61, 0x68, 0xdd, 0x9f, 0x0c, 0xea, 0x22, 0xdb, 0x8d, 0xf4, 0xf0,
- 0xda, 0x48, 0x27, 0xd7, 0xef, 0xf9, 0x8f, 0x95, 0xb6, 0xa8, 0x95, 0x7f, 0x46, 0xd5, 0xff, 0x8a,
- 0x7a, 0x67, 0x87, 0x7a, 0x45, 0xb7, 0x57, 0xa2, 0x63, 0xf9, 0x16, 0xf9, 0x7f, 0xc0, 0x26, 0x68,
- 0x51, 0xa8, 0x98, 0x1a, 0x5c, 0x8b, 0x42, 0xbc, 0x09, 0xd5, 0x20, 0x0c, 0x93, 0xd4, 0xd4, 0x3a,
- 0x95, 0x5e, 0x83, 0x17, 0xa2, 0x3b, 0x83, 0x7a, 0xe9, 0xa9, 0xf0, 0x18, 0x6a, 0xf2, 0x7d, 0x1c,
- 0x25, 0xc5, 0x18, 0x3a, 0xdf, 0xa8, 0x3f, 0x97, 0xd1, 0x84, 0xfd, 0xf3, 0xe5, 0x6a, 0xf1, 0x5a,
- 0x26, 0x0a, 0xb1, 0xc1, 0xaf, 0x64, 0xf7, 0x11, 0x54, 0x15, 0x21, 0xde, 0x82, 0x1b, 0xe1, 0x2a,
- 0x29, 0x3e, 0x13, 0xd6, 0x61, 0xbd, 0x03, 0xbe, 0xd5, 0x88, 0xa0, 0x87, 0x41, 0x16, 0xa8, 0x15,
- 0x75, 0xae, 0x7e, 0x9f, 0x7e, 0x66, 0x50, 0x2b, 0x88, 0xb1, 0x06, 0x9a, 0xf7, 0xdc, 0x08, 0xd1,
- 0x84, 0xa3, 0xe2, 0x51, 0x87, 0xbe, 0xed, 0xb9, 0x73, 0x4e, 0xe3, 0x99, 0x20, 0xcb, 0xb8, 0x64,
- 0x78, 0x02, 0x2d, 0x4e, 0xc2, 0x9b, 0xf1, 0x11, 0xcd, 0x27, 0xb6, 0x63, 0xfb, 0x73, 0x3a, 0x1b,
- 0x11, 0x59, 0x64, 0x19, 0x5f, 0x18, 0x1e, 0xc3, 0xe1, 0x94, 0xb8, 0x63, 0x0b, 0x91, 0xd7, 0x2c,
- 0x72, 0x6d, 0xb2, 0x8c, 0xaf, 0xea, 0x7c, 0xb3, 0x5c, 0x7e, 0x3e, 0x1e, 0xda, 0x13, 0xb2, 0x8c,
- 0x6f, 0x0c, 0x8f, 0xa0, 0xe9, 0x7a, 0xf3, 0xd2, 0x55, 0xc6, 0x77, 0x15, 0x76, 0x86, 0x93, 0xb1,
- 0xc7, 0x1d, 0xb2, 0xe6, 0x0e, 0x09, 0x31, 0x7c, 0x4a, 0xc6, 0x87, 0x0a, 0xb6, 0x00, 0x67, 0x2e,
- 0x9d, 0x4d, 0x69, 0xe4, 0x97, 0x8c, 0x8f, 0x95, 0x27, 0x8d, 0xcb, 0x75, 0x9b, 0xfd, 0x58, 0xb7,
- 0xd9, 0xaf, 0x75, 0x9b, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x63, 0x19, 0x1e, 0x6c, 0xaa, 0x03,
- 0x00, 0x00,
-}
-
-func (m *HopMessage) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
+func (x *Peer) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *HopMessage) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
+func (*Peer) ProtoMessage() {}
-func (m *HopMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Status != nil {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Status))
- i--
- dAtA[i] = 0x28
- }
- if m.Limit != nil {
- {
- size, err := m.Limit.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuit(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.Reservation != nil {
- {
- size, err := m.Reservation.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuit(dAtA, i, uint64(size))
+func (x *Peer) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_circuit_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- i--
- dAtA[i] = 0x1a
+ return ms
}
- if m.Peer != nil {
- {
- size, err := m.Peer.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuit(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Type == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type")
- } else {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Type))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
+ return mi.MessageOf(x)
}
-func (m *StopMessage) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StopMessage) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+// Deprecated: Use Peer.ProtoReflect.Descriptor instead.
+func (*Peer) Descriptor() ([]byte, []int) {
+ return file_pb_circuit_proto_rawDescGZIP(), []int{2}
}
-func (m *StopMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Status != nil {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Status))
- i--
- dAtA[i] = 0x20
- }
- if m.Limit != nil {
- {
- size, err := m.Limit.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuit(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.Peer != nil {
- {
- size, err := m.Peer.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCircuit(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Type == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type")
- } else {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Type))
- i--
- dAtA[i] = 0x8
+func (x *Peer) GetId() []byte {
+ if x != nil {
+ return x.Id
}
- return len(dAtA) - i, nil
+ return nil
}
-func (m *Peer) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *Peer) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
}
- return dAtA[:n], nil
+ return nil
}
-func (m *Peer) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
+type Reservation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Addrs) > 0 {
- for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Addrs[iNdEx])
- copy(dAtA[i:], m.Addrs[iNdEx])
- i = encodeVarintCircuit(dAtA, i, uint64(len(m.Addrs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Id == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("id")
- } else {
- i -= len(m.Id)
- copy(dAtA[i:], m.Id)
- i = encodeVarintCircuit(dAtA, i, uint64(len(m.Id)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Expire *uint64 `protobuf:"varint,1,opt,name=expire,proto3,oneof" json:"expire,omitempty"` // Unix expiration time (UTC)
+ Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` // relay addrs for reserving peer
+ Voucher []byte `protobuf:"bytes,3,opt,name=voucher,proto3,oneof" json:"voucher,omitempty"` // reservation voucher
}
-func (m *Reservation) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *Reservation) Reset() {
+ *x = Reservation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_circuit_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return dAtA[:n], nil
}
-func (m *Reservation) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (x *Reservation) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Reservation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Voucher != nil {
- i -= len(m.Voucher)
- copy(dAtA[i:], m.Voucher)
- i = encodeVarintCircuit(dAtA, i, uint64(len(m.Voucher)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Addrs) > 0 {
- for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Addrs[iNdEx])
- copy(dAtA[i:], m.Addrs[iNdEx])
- i = encodeVarintCircuit(dAtA, i, uint64(len(m.Addrs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Expire == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("expire")
- } else {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Expire))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
+func (*Reservation) ProtoMessage() {}
-func (m *Limit) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *Reservation) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_circuit_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return dAtA[:n], nil
+ return mi.MessageOf(x)
}
-func (m *Limit) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+// Deprecated: Use Reservation.ProtoReflect.Descriptor instead.
+func (*Reservation) Descriptor() ([]byte, []int) {
+ return file_pb_circuit_proto_rawDescGZIP(), []int{3}
}
-func (m *Limit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
+func (x *Reservation) GetExpire() uint64 {
+ if x != nil && x.Expire != nil {
+ return *x.Expire
}
- if m.Data != nil {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Data))
- i--
- dAtA[i] = 0x10
- }
- if m.Duration != nil {
- i = encodeVarintCircuit(dAtA, i, uint64(*m.Duration))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
+ return 0
}
-func encodeVarintCircuit(dAtA []byte, offset int, v uint64) int {
- offset -= sovCircuit(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *HopMessage) Size() (n int) {
- if m == nil {
- return 0
+func (x *Reservation) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
}
- var l int
- _ = l
- if m.Type != nil {
- n += 1 + sovCircuit(uint64(*m.Type))
- }
- if m.Peer != nil {
- l = m.Peer.Size()
- n += 1 + l + sovCircuit(uint64(l))
- }
- if m.Reservation != nil {
- l = m.Reservation.Size()
- n += 1 + l + sovCircuit(uint64(l))
- }
- if m.Limit != nil {
- l = m.Limit.Size()
- n += 1 + l + sovCircuit(uint64(l))
- }
- if m.Status != nil {
- n += 1 + sovCircuit(uint64(*m.Status))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+ return nil
}
-func (m *StopMessage) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != nil {
- n += 1 + sovCircuit(uint64(*m.Type))
- }
- if m.Peer != nil {
- l = m.Peer.Size()
- n += 1 + l + sovCircuit(uint64(l))
- }
- if m.Limit != nil {
- l = m.Limit.Size()
- n += 1 + l + sovCircuit(uint64(l))
+func (x *Reservation) GetVoucher() []byte {
+ if x != nil {
+ return x.Voucher
}
- if m.Status != nil {
- n += 1 + sovCircuit(uint64(*m.Status))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+ return nil
}
-func (m *Peer) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Id != nil {
- l = len(m.Id)
- n += 1 + l + sovCircuit(uint64(l))
- }
- if len(m.Addrs) > 0 {
- for _, b := range m.Addrs {
- l = len(b)
- n += 1 + l + sovCircuit(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
+type Limit struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Reservation) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Expire != nil {
- n += 1 + sovCircuit(uint64(*m.Expire))
- }
- if len(m.Addrs) > 0 {
- for _, b := range m.Addrs {
- l = len(b)
- n += 1 + l + sovCircuit(uint64(l))
- }
- }
- if m.Voucher != nil {
- l = len(m.Voucher)
- n += 1 + l + sovCircuit(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+ Duration *uint32 `protobuf:"varint,1,opt,name=duration,proto3,oneof" json:"duration,omitempty"` // seconds
+ Data *uint64 `protobuf:"varint,2,opt,name=data,proto3,oneof" json:"data,omitempty"` // bytes
}
-func (m *Limit) Size() (n int) {
- if m == nil {
- return 0
+func (x *Limit) Reset() {
+ *x = Limit{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_circuit_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- var l int
- _ = l
- if m.Duration != nil {
- n += 1 + sovCircuit(uint64(*m.Duration))
- }
- if m.Data != nil {
- n += 1 + sovCircuit(uint64(*m.Data))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
}
-func sovCircuit(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
+func (x *Limit) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func sozCircuit(x uint64) (n int) {
- return sovCircuit(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *HopMessage) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HopMessage: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HopMessage: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var v HopMessage_Type
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= HopMessage_Type(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Type = &v
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Peer == nil {
- m.Peer = &Peer{}
- }
- if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Reservation", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Reservation == nil {
- m.Reservation = &Reservation{}
- }
- if err := m.Reservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Limit == nil {
- m.Limit = &Limit{}
- }
- if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var v Status
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= Status(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Status = &v
- default:
- iNdEx = preIndex
- skippy, err := skipCircuit(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCircuit
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type")
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StopMessage) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StopMessage: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StopMessage: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var v StopMessage_Type
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= StopMessage_Type(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Type = &v
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Peer == nil {
- m.Peer = &Peer{}
- }
- if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Limit == nil {
- m.Limit = &Limit{}
- }
- if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var v Status
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= Status(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Status = &v
- default:
- iNdEx = preIndex
- skippy, err := skipCircuit(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCircuit
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type")
- }
+func (*Limit) ProtoMessage() {}
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Peer) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Peer: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...)
- if m.Id == nil {
- m.Id = []byte{}
- }
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx))
- copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCircuit(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCircuit
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
+func (x *Limit) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_circuit_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
+ return ms
}
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id")
- }
+ return mi.MessageOf(x)
+}
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
+// Deprecated: Use Limit.ProtoReflect.Descriptor instead.
+func (*Limit) Descriptor() ([]byte, []int) {
+ return file_pb_circuit_proto_rawDescGZIP(), []int{4}
}
-func (m *Reservation) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Reservation: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Reservation: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expire", wireType)
- }
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Expire = &v
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx))
- copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Voucher", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthCircuit
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthCircuit
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Voucher = append(m.Voucher[:0], dAtA[iNdEx:postIndex]...)
- if m.Voucher == nil {
- m.Voucher = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipCircuit(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCircuit
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("expire")
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *Limit) GetDuration() uint32 {
+ if x != nil && x.Duration != nil {
+ return *x.Duration
}
- return nil
+ return 0
}
-func (m *Limit) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Limit: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Limit: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType)
- }
- var v uint32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Duration = &v
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Data = &v
- default:
- iNdEx = preIndex
- skippy, err := skipCircuit(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthCircuit
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *Limit) GetData() uint64 {
+ if x != nil && x.Data != nil {
+ return *x.Data
}
- return nil
+ return 0
}
-func skipCircuit(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowCircuit
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthCircuit
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupCircuit
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthCircuit
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
+
+var File_pb_circuit_proto protoreflect.FileDescriptor
+
+var file_pb_circuit_proto_rawDesc = []byte{
+ 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x22, 0xf1,
+ 0x02, 0x0a, 0x0a, 0x48, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, 0x69,
+ 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50,
+ 0x65, 0x65, 0x72, 0x48, 0x01, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x3e,
+ 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62,
+ 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x02, 0x52, 0x0b,
+ 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2c,
+ 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
+ 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74,
+ 0x48, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x06,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63,
+ 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x48, 0x04, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x88, 0x01, 0x01, 0x22, 0x2c, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45,
+ 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x01, 0x12,
+ 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x10, 0x02, 0x42, 0x07, 0x0a, 0x05, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x0e, 0x0a,
+ 0x0c, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a,
+ 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x22, 0x96, 0x02, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x1c, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74,
+ 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00,
+ 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x65, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x01, 0x52, 0x04, 0x70, 0x65, 0x65,
+ 0x72, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62,
+ 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x48, 0x02, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88,
+ 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x03, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x88, 0x01, 0x01, 0x22, 0x1f, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54,
+ 0x55, 0x53, 0x10, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x07, 0x0a,
+ 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x38, 0x0a, 0x04, 0x50,
+ 0x65, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48,
+ 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x42, 0x05,
+ 0x0a, 0x03, 0x5f, 0x69, 0x64, 0x22, 0x76, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x88, 0x01,
+ 0x01, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x07, 0x76, 0x6f, 0x75, 0x63, 0x68,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x76, 0x6f, 0x75, 0x63,
+ 0x68, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x76, 0x6f, 0x75, 0x63, 0x68, 0x65, 0x72, 0x22, 0x57, 0x0a,
+ 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01,
+ 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x0a,
+ 0x05, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2a, 0xca, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x55, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x06, 0x0a,
+ 0x02, 0x4f, 0x4b, 0x10, 0x64, 0x12, 0x18, 0x0a, 0x13, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x53, 0x45, 0x44, 0x10, 0xc8, 0x01, 0x12,
+ 0x1c, 0x0a, 0x17, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49,
+ 0x54, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xc9, 0x01, 0x12, 0x16, 0x0a,
+ 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49,
+ 0x45, 0x44, 0x10, 0xca, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xcb, 0x01, 0x12, 0x13, 0x0a,
+ 0x0e, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10,
+ 0xcc, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4d, 0x41, 0x4c, 0x46, 0x4f, 0x52, 0x4d, 0x45, 0x44, 0x5f,
+ 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x90, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x55, 0x4e,
+ 0x45, 0x58, 0x50, 0x45, 0x43, 0x54, 0x45, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+ 0x10, 0x91, 0x03, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
- ErrInvalidLengthCircuit = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowCircuit = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupCircuit = fmt.Errorf("proto: unexpected end of group")
+ file_pb_circuit_proto_rawDescOnce sync.Once
+ file_pb_circuit_proto_rawDescData = file_pb_circuit_proto_rawDesc
)
+
+func file_pb_circuit_proto_rawDescGZIP() []byte {
+ file_pb_circuit_proto_rawDescOnce.Do(func() {
+ file_pb_circuit_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_circuit_proto_rawDescData)
+ })
+ return file_pb_circuit_proto_rawDescData
+}
+
+var file_pb_circuit_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_pb_circuit_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_pb_circuit_proto_goTypes = []interface{}{
+ (Status)(0), // 0: circuit.pb.Status
+ (HopMessage_Type)(0), // 1: circuit.pb.HopMessage.Type
+ (StopMessage_Type)(0), // 2: circuit.pb.StopMessage.Type
+ (*HopMessage)(nil), // 3: circuit.pb.HopMessage
+ (*StopMessage)(nil), // 4: circuit.pb.StopMessage
+ (*Peer)(nil), // 5: circuit.pb.Peer
+ (*Reservation)(nil), // 6: circuit.pb.Reservation
+ (*Limit)(nil), // 7: circuit.pb.Limit
+}
+var file_pb_circuit_proto_depIdxs = []int32{
+ 1, // 0: circuit.pb.HopMessage.type:type_name -> circuit.pb.HopMessage.Type
+ 5, // 1: circuit.pb.HopMessage.peer:type_name -> circuit.pb.Peer
+ 6, // 2: circuit.pb.HopMessage.reservation:type_name -> circuit.pb.Reservation
+ 7, // 3: circuit.pb.HopMessage.limit:type_name -> circuit.pb.Limit
+ 0, // 4: circuit.pb.HopMessage.status:type_name -> circuit.pb.Status
+ 2, // 5: circuit.pb.StopMessage.type:type_name -> circuit.pb.StopMessage.Type
+ 5, // 6: circuit.pb.StopMessage.peer:type_name -> circuit.pb.Peer
+ 7, // 7: circuit.pb.StopMessage.limit:type_name -> circuit.pb.Limit
+ 0, // 8: circuit.pb.StopMessage.status:type_name -> circuit.pb.Status
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_pb_circuit_proto_init() }
+func file_pb_circuit_proto_init() {
+ if File_pb_circuit_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_circuit_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HopMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pb_circuit_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StopMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pb_circuit_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Peer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pb_circuit_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reservation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pb_circuit_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Limit); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_pb_circuit_proto_msgTypes[0].OneofWrappers = []interface{}{}
+ file_pb_circuit_proto_msgTypes[1].OneofWrappers = []interface{}{}
+ file_pb_circuit_proto_msgTypes[2].OneofWrappers = []interface{}{}
+ file_pb_circuit_proto_msgTypes[3].OneofWrappers = []interface{}{}
+ file_pb_circuit_proto_msgTypes[4].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_circuit_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_circuit_proto_goTypes,
+ DependencyIndexes: file_pb_circuit_proto_depIdxs,
+ EnumInfos: file_pb_circuit_proto_enumTypes,
+ MessageInfos: file_pb_circuit_proto_msgTypes,
+ }.Build()
+ File_pb_circuit_proto = out.File
+ file_pb_circuit_proto_rawDesc = nil
+ file_pb_circuit_proto_goTypes = nil
+ file_pb_circuit_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto
index 370566f4b..b9b65fa05 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto
@@ -1,4 +1,4 @@
-syntax = "proto2";
+syntax = "proto3";
package circuit.pb;
@@ -9,7 +9,9 @@ message HopMessage {
STATUS = 2;
}
- required Type type = 1;
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional Type type = 1;
optional Peer peer = 2;
optional Reservation reservation = 3;
@@ -24,7 +26,9 @@ message StopMessage {
STATUS = 1;
}
- required Type type = 1;
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional Type type = 1;
optional Peer peer = 2;
optional Limit limit = 3;
@@ -33,12 +37,16 @@ message StopMessage {
}
message Peer {
- required bytes id = 1;
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional bytes id = 1;
repeated bytes addrs = 2;
}
message Reservation {
- required uint64 expire = 1; // Unix expiration time (UTC)
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional uint64 expire = 1; // Unix expiration time (UTC)
repeated bytes addrs = 2; // relay addrs for reserving peer
optional bytes voucher = 3; // reservation voucher
}
@@ -49,6 +57,8 @@ message Limit {
}
enum Status {
+ // zero value field required for proto3 compatibility
+ UNUSED = 0;
OK = 100;
RESERVATION_REFUSED = 200;
RESOURCE_LIMIT_EXCEEDED = 201;
@@ -57,4 +67,4 @@ enum Status {
NO_RESERVATION = 204;
MALFORMED_MESSAGE = 400;
UNEXPECTED_MESSAGE = 401;
-}
+}
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go
index 6fed0082e..917b53702 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go
@@ -1,438 +1,167 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: voucher.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/voucher.proto
-package circuit_pb
+package pb
import (
- fmt "fmt"
- github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type ReservationVoucher struct {
- Relay []byte `protobuf:"bytes,1,req,name=relay" json:"relay,omitempty"`
- Peer []byte `protobuf:"bytes,2,req,name=peer" json:"peer,omitempty"`
- Expiration *uint64 `protobuf:"varint,3,req,name=expiration" json:"expiration,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // These fields are marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set these.
+ Relay []byte `protobuf:"bytes,1,opt,name=relay,proto3,oneof" json:"relay,omitempty"`
+ Peer []byte `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ Expiration *uint64 `protobuf:"varint,3,opt,name=expiration,proto3,oneof" json:"expiration,omitempty"`
}
-func (m *ReservationVoucher) Reset() { *m = ReservationVoucher{} }
-func (m *ReservationVoucher) String() string { return proto.CompactTextString(m) }
-func (*ReservationVoucher) ProtoMessage() {}
-func (*ReservationVoucher) Descriptor() ([]byte, []int) {
- return fileDescriptor_a22a9b0d3335ba25, []int{0}
+func (x *ReservationVoucher) Reset() {
+ *x = ReservationVoucher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_voucher_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *ReservationVoucher) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (x *ReservationVoucher) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *ReservationVoucher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ReservationVoucher.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
+
+func (*ReservationVoucher) ProtoMessage() {}
+
+func (x *ReservationVoucher) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_voucher_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
}
- return b[:n], nil
+ return ms
}
-}
-func (m *ReservationVoucher) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReservationVoucher.Merge(m, src)
-}
-func (m *ReservationVoucher) XXX_Size() int {
- return m.Size()
-}
-func (m *ReservationVoucher) XXX_DiscardUnknown() {
- xxx_messageInfo_ReservationVoucher.DiscardUnknown(m)
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_ReservationVoucher proto.InternalMessageInfo
+// Deprecated: Use ReservationVoucher.ProtoReflect.Descriptor instead.
+func (*ReservationVoucher) Descriptor() ([]byte, []int) {
+ return file_pb_voucher_proto_rawDescGZIP(), []int{0}
+}
-func (m *ReservationVoucher) GetRelay() []byte {
- if m != nil {
- return m.Relay
+func (x *ReservationVoucher) GetRelay() []byte {
+ if x != nil {
+ return x.Relay
}
return nil
}
-func (m *ReservationVoucher) GetPeer() []byte {
- if m != nil {
- return m.Peer
+func (x *ReservationVoucher) GetPeer() []byte {
+ if x != nil {
+ return x.Peer
}
return nil
}
-func (m *ReservationVoucher) GetExpiration() uint64 {
- if m != nil && m.Expiration != nil {
- return *m.Expiration
+func (x *ReservationVoucher) GetExpiration() uint64 {
+ if x != nil && x.Expiration != nil {
+ return *x.Expiration
}
return 0
}
-func init() {
- proto.RegisterType((*ReservationVoucher)(nil), "circuit.pb.ReservationVoucher")
-}
-
-func init() { proto.RegisterFile("voucher.proto", fileDescriptor_a22a9b0d3335ba25) }
-
-var fileDescriptor_a22a9b0d3335ba25 = []byte{
- // 135 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0xcb, 0x2f, 0x4d,
- 0xce, 0x48, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4a, 0xce, 0x2c, 0x4a, 0x2e,
- 0xcd, 0x2c, 0xd1, 0x2b, 0x48, 0x52, 0x8a, 0xe3, 0x12, 0x0a, 0x4a, 0x2d, 0x4e, 0x2d, 0x2a, 0x4b,
- 0x2c, 0xc9, 0xcc, 0xcf, 0x0b, 0x83, 0xa8, 0x13, 0x12, 0xe1, 0x62, 0x2d, 0x4a, 0xcd, 0x49, 0xac,
- 0x94, 0x60, 0x54, 0x60, 0xd2, 0xe0, 0x09, 0x82, 0x70, 0x84, 0x84, 0xb8, 0x58, 0x0a, 0x52, 0x53,
- 0x8b, 0x24, 0x98, 0xc0, 0x82, 0x60, 0xb6, 0x90, 0x1c, 0x17, 0x57, 0x6a, 0x45, 0x41, 0x66, 0x11,
- 0x58, 0xbb, 0x04, 0xb3, 0x02, 0x93, 0x06, 0x4b, 0x10, 0x92, 0x88, 0x13, 0xcf, 0x89, 0x47, 0x72,
- 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x08, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc0,
- 0x81, 0x3a, 0xee, 0x89, 0x00, 0x00, 0x00,
-}
-
-func (m *ReservationVoucher) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ReservationVoucher) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
+var File_pb_voucher_proto protoreflect.FileDescriptor
-func (m *ReservationVoucher) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Expiration == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("expiration")
- } else {
- i = encodeVarintVoucher(dAtA, i, uint64(*m.Expiration))
- i--
- dAtA[i] = 0x18
- }
- if m.Peer == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("peer")
- } else {
- i -= len(m.Peer)
- copy(dAtA[i:], m.Peer)
- i = encodeVarintVoucher(dAtA, i, uint64(len(m.Peer)))
- i--
- dAtA[i] = 0x12
- }
- if m.Relay == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("relay")
- } else {
- i -= len(m.Relay)
- copy(dAtA[i:], m.Relay)
- i = encodeVarintVoucher(dAtA, i, uint64(len(m.Relay)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
+var file_pb_voucher_proto_rawDesc = []byte{
+ 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x75, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x22, 0x8f,
+ 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x6f,
+ 0x75, 0x63, 0x68, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x88, 0x01, 0x01,
+ 0x12, 0x17, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01,
+ 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52,
+ 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08,
+ 0x0a, 0x06, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65,
+ 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func encodeVarintVoucher(dAtA []byte, offset int, v uint64) int {
- offset -= sovVoucher(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ReservationVoucher) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Relay != nil {
- l = len(m.Relay)
- n += 1 + l + sovVoucher(uint64(l))
- }
- if m.Peer != nil {
- l = len(m.Peer)
- n += 1 + l + sovVoucher(uint64(l))
- }
- if m.Expiration != nil {
- n += 1 + sovVoucher(uint64(*m.Expiration))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovVoucher(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozVoucher(x uint64) (n int) {
- return sovVoucher(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ReservationVoucher) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ReservationVoucher: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ReservationVoucher: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Relay", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthVoucher
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthVoucher
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Relay = append(m.Relay[:0], dAtA[iNdEx:postIndex]...)
- if m.Relay == nil {
- m.Relay = []byte{}
- }
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthVoucher
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthVoucher
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Peer = append(m.Peer[:0], dAtA[iNdEx:postIndex]...)
- if m.Peer == nil {
- m.Peer = []byte{}
- }
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000002)
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
- }
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Expiration = &v
- hasFields[0] |= uint64(0x00000004)
- default:
- iNdEx = preIndex
- skippy, err := skipVoucher(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthVoucher
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthVoucher
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("relay")
- }
- if hasFields[0]&uint64(0x00000002) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("peer")
- }
- if hasFields[0]&uint64(0x00000004) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("expiration")
- }
+var (
+ file_pb_voucher_proto_rawDescOnce sync.Once
+ file_pb_voucher_proto_rawDescData = file_pb_voucher_proto_rawDesc
+)
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipVoucher(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+func file_pb_voucher_proto_rawDescGZIP() []byte {
+ file_pb_voucher_proto_rawDescOnce.Do(func() {
+ file_pb_voucher_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_voucher_proto_rawDescData)
+ })
+ return file_pb_voucher_proto_rawDescData
+}
+
+var file_pb_voucher_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pb_voucher_proto_goTypes = []interface{}{
+ (*ReservationVoucher)(nil), // 0: circuit.pb.ReservationVoucher
+}
+var file_pb_voucher_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_pb_voucher_proto_init() }
+func file_pb_voucher_proto_init() {
+ if File_pb_voucher_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_voucher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ReservationVoucher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
}
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoucher
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthVoucher
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupVoucher
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthVoucher
- }
- if depth == 0 {
- return iNdEx, nil
- }
}
- return 0, io.ErrUnexpectedEOF
+ file_pb_voucher_proto_msgTypes[0].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_voucher_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_voucher_proto_goTypes,
+ DependencyIndexes: file_pb_voucher_proto_depIdxs,
+ MessageInfos: file_pb_voucher_proto_msgTypes,
+ }.Build()
+ File_pb_voucher_proto = out.File
+ file_pb_voucher_proto_rawDesc = nil
+ file_pb_voucher_proto_goTypes = nil
+ file_pb_voucher_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthVoucher = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowVoucher = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupVoucher = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto
index 086440253..1e2e79631 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto
@@ -1,9 +1,11 @@
-syntax = "proto2";
+syntax = "proto3";
package circuit.pb;
message ReservationVoucher {
- required bytes relay = 1;
- required bytes peer = 2;
- required uint64 expiration = 3;
-}
+ // These fields are marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set these.
+ optional bytes relay = 1;
+ optional bytes peer = 2;
+ optional uint64 expiration = 3;
+}
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go
index d27fc5098..4b6d96b88 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go
@@ -1,7 +1,6 @@
package proto
const (
- ProtoIDv1 = "/libp2p/circuit/relay/0.1.0"
ProtoIDv2Hop = "/libp2p/circuit/relay/0.2.0/hop"
ProtoIDv2Stop = "/libp2p/circuit/relay/0.2.0/stop"
)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go
index fd50fccce..7114d81c6 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go
@@ -6,6 +6,8 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/record"
pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+
+ "google.golang.org/protobuf/proto"
)
const RecordDomain = "libp2p-relay-rsvp"
@@ -37,21 +39,17 @@ func (rv *ReservationVoucher) Codec() []byte {
}
func (rv *ReservationVoucher) MarshalRecord() ([]byte, error) {
- relay := []byte(rv.Relay)
- peer := []byte(rv.Peer)
expiration := uint64(rv.Expiration.Unix())
- pbrv := &pbv2.ReservationVoucher{
- Relay: relay,
- Peer: peer,
+ return proto.Marshal(&pbv2.ReservationVoucher{
+ Relay: []byte(rv.Relay),
+ Peer: []byte(rv.Peer),
Expiration: &expiration,
- }
-
- return pbrv.Marshal()
+ })
}
func (rv *ReservationVoucher) UnmarshalRecord(blob []byte) error {
pbrv := pbv2.ReservationVoucher{}
- err := pbrv.Unmarshal(blob)
+ err := proto.Unmarshal(blob, &pbrv)
if err != nil {
return err
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go
new file mode 100644
index 000000000..778645913
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go
@@ -0,0 +1,268 @@
+package relay
+
+import (
+ "time"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_relaysvc"
+
+var (
+ status = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "status",
+ Help: "Relay Status",
+ },
+ )
+
+ reservationsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservations_total",
+ Help: "Relay Reservation Request",
+ },
+ []string{"type"},
+ )
+ reservationRequestResponseStatusTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservation_request_response_status_total",
+ Help: "Relay Reservation Request Response Status",
+ },
+ []string{"status"},
+ )
+ reservationRejectionsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservation_rejections_total",
+ Help: "Relay Reservation Rejected Reason",
+ },
+ []string{"reason"},
+ )
+
+ connectionsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connections_total",
+ Help: "Relay Connection Total",
+ },
+ []string{"type"},
+ )
+ connectionRequestResponseStatusTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connection_request_response_status_total",
+ Help: "Relay Connection Request Status",
+ },
+ []string{"status"},
+ )
+ connectionRejectionsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connection_rejections_total",
+ Help: "Relay Connection Rejected Reason",
+ },
+ []string{"reason"},
+ )
+ connectionDurationSeconds = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "connection_duration_seconds",
+ Help: "Relay Connection Duration",
+ },
+ )
+
+ dataTransferredBytesTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "data_transferred_bytes_total",
+ Help: "Bytes Transferred Total",
+ },
+ )
+
+ collectors = []prometheus.Collector{
+ status,
+ reservationsTotal,
+ reservationRequestResponseStatusTotal,
+ reservationRejectionsTotal,
+ connectionsTotal,
+ connectionRequestResponseStatusTotal,
+ connectionRejectionsTotal,
+ connectionDurationSeconds,
+ dataTransferredBytesTotal,
+ }
+)
+
+const (
+ requestStatusOK = "ok"
+ requestStatusRejected = "rejected"
+ requestStatusError = "error"
+)
+
+// MetricsTracer is the interface for tracking metrics for relay service
+type MetricsTracer interface {
+ // RelayStatus tracks whether the service is currently active
+ RelayStatus(enabled bool)
+
+ // ConnectionOpened tracks metrics on opening a relay connection
+ ConnectionOpened()
+ // ConnectionClosed tracks metrics on closing a relay connection
+ ConnectionClosed(d time.Duration)
+ // ConnectionRequestHandled tracks metrics on handling a relay connection request
+ ConnectionRequestHandled(status pbv2.Status)
+
+ // ReservationAllowed tracks metrics on opening or renewing a relay reservation
+ ReservationAllowed(isRenewal bool)
+ // ReservationRequestClosed tracks metrics on closing a relay reservation
+ ReservationClosed(cnt int)
+ // ReservationRequestHandled tracks metrics on handling a relay reservation request
+ ReservationRequestHandled(status pbv2.Status)
+
+ // BytesTransferred tracks the total bytes transferred by the relay service
+ BytesTransferred(cnt int)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (mt *metricsTracer) RelayStatus(enabled bool) {
+ if enabled {
+ status.Set(1)
+ } else {
+ status.Set(0)
+ }
+}
+
+func (mt *metricsTracer) ConnectionOpened() {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "opened")
+
+ connectionsTotal.WithLabelValues(*tags...).Add(1)
+}
+
+func (mt *metricsTracer) ConnectionClosed(d time.Duration) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "closed")
+
+ connectionsTotal.WithLabelValues(*tags...).Add(1)
+ connectionDurationSeconds.Observe(d.Seconds())
+}
+
+func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ respStatus := getResponseStatus(status)
+
+ *tags = append(*tags, respStatus)
+ connectionRequestResponseStatusTotal.WithLabelValues(*tags...).Add(1)
+ if respStatus == requestStatusRejected {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, getRejectionReason(status))
+ connectionRejectionsTotal.WithLabelValues(*tags...).Add(1)
+ }
+}
+
+func (mt *metricsTracer) ReservationAllowed(isRenewal bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if isRenewal {
+ *tags = append(*tags, "renewed")
+ } else {
+ *tags = append(*tags, "opened")
+ }
+
+ reservationsTotal.WithLabelValues(*tags...).Add(1)
+}
+
+func (mt *metricsTracer) ReservationClosed(cnt int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "closed")
+
+ reservationsTotal.WithLabelValues(*tags...).Add(float64(cnt))
+}
+
+func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ respStatus := getResponseStatus(status)
+
+ *tags = append(*tags, respStatus)
+ reservationRequestResponseStatusTotal.WithLabelValues(*tags...).Add(1)
+ if respStatus == requestStatusRejected {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, getRejectionReason(status))
+ reservationRejectionsTotal.WithLabelValues(*tags...).Add(1)
+ }
+}
+
+func (mt *metricsTracer) BytesTransferred(cnt int) {
+ dataTransferredBytesTotal.Add(float64(cnt))
+}
+
+func getResponseStatus(status pbv2.Status) string {
+ responseStatus := "unknown"
+ switch status {
+ case pbv2.Status_RESERVATION_REFUSED,
+ pbv2.Status_RESOURCE_LIMIT_EXCEEDED,
+ pbv2.Status_PERMISSION_DENIED,
+ pbv2.Status_NO_RESERVATION,
+ pbv2.Status_MALFORMED_MESSAGE:
+
+ responseStatus = requestStatusRejected
+ case pbv2.Status_UNEXPECTED_MESSAGE, pbv2.Status_CONNECTION_FAILED:
+ responseStatus = requestStatusError
+ case pbv2.Status_OK:
+ responseStatus = requestStatusOK
+ }
+ return responseStatus
+}
+
+func getRejectionReason(status pbv2.Status) string {
+ reason := "unknown"
+ switch status {
+ case pbv2.Status_RESERVATION_REFUSED:
+ reason = "ip constraint violation"
+ case pbv2.Status_RESOURCE_LIMIT_EXCEEDED:
+ reason = "resource limit exceeded"
+ case pbv2.Status_PERMISSION_DENIED:
+ reason = "permission denied"
+ case pbv2.Status_NO_RESERVATION:
+ reason = "no reservation"
+ case pbv2.Status_MALFORMED_MESSAGE:
+ reason = "malformed message"
+ }
+ return reason
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go
index 346411522..3b50ec385 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go
@@ -18,6 +18,14 @@ func WithLimit(limit *RelayLimit) Option {
}
}
+// WithInfiniteLimits is a Relay option that disables limits.
+func WithInfiniteLimits() Option {
+ return func(r *Relay) error {
+ r.rc.Limit = nil
+ return nil
+ }
+}
+
// WithACL is a Relay option that supplies an ACLFilter for access control.
func WithACL(acl ACLFilter) Option {
return func(r *Relay) error {
@@ -25,3 +33,11 @@ func WithACL(acl ACLFilter) Option {
return nil
}
}
+
+// WithMetricsTracer is a Relay option that supplies a MetricsTracer for metrics
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(r *Relay) error {
+ r.metricsTracer = mt
+ return nil
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go
index 5e2d9f183..e3c8f4758 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go
@@ -2,6 +2,7 @@ package relay
import (
"context"
+ "errors"
"fmt"
"io"
"sync"
@@ -18,6 +19,7 @@ import (
logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool"
+ asnutil "github.com/libp2p/go-libp2p-asn-util"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -41,7 +43,6 @@ var log = logging.Logger("relay")
// Relay is the (limited) relay service object.
type Relay struct {
- closed uint32
ctx context.Context
cancel func()
@@ -50,12 +51,16 @@ type Relay struct {
acl ACLFilter
constraints *constraints
scope network.ResourceScopeSpan
+ notifiee network.Notifiee
- mx sync.Mutex
- rsvp map[peer.ID]time.Time
- conns map[peer.ID]int
+ mx sync.Mutex
+ rsvp map[peer.ID]time.Time
+ conns map[peer.ID]int
+ closed bool
selfAddr ma.Multiaddr
+
+ metricsTracer MetricsTracer
}
// New constructs a new limited relay that can provide relay services in the given host.
@@ -94,26 +99,34 @@ func New(h host.Host, opts ...Option) (*Relay, error) {
r.selfAddr = ma.StringCast(fmt.Sprintf("/p2p/%s", h.ID()))
h.SetStreamHandler(proto.ProtoIDv2Hop, r.handleStream)
- h.Network().Notify(
- &network.NotifyBundle{
- DisconnectedF: r.disconnected,
- })
+ r.notifiee = &network.NotifyBundle{DisconnectedF: r.disconnected}
+ h.Network().Notify(r.notifiee)
+
+ if r.metricsTracer != nil {
+ r.metricsTracer.RelayStatus(true)
+ }
go r.background()
return r, nil
}
func (r *Relay) Close() error {
- if atomic.CompareAndSwapUint32(&r.closed, 0, 1) {
+ r.mx.Lock()
+ if !r.closed {
+ r.closed = true
+ r.mx.Unlock()
+
r.host.RemoveStreamHandler(proto.ProtoIDv2Hop)
+ r.host.Network().StopNotify(r.notifiee)
r.scope.Done()
r.cancel()
- r.mx.Lock()
- for p := range r.rsvp {
- r.host.ConnManager().UntagPeer(p, "relay-reservation")
+ r.gc()
+ if r.metricsTracer != nil {
+ r.metricsTracer.RelayStatus(false)
}
- r.mx.Unlock()
+ return nil
}
+ r.mx.Unlock()
return nil
}
@@ -147,38 +160,48 @@ func (r *Relay) handleStream(s network.Stream) {
}
// reset stream deadline as message has been read
s.SetReadDeadline(time.Time{})
-
switch msg.GetType() {
case pbv2.HopMessage_RESERVE:
- r.handleReserve(s)
-
+ status := r.handleReserve(s)
+ if r.metricsTracer != nil {
+ r.metricsTracer.ReservationRequestHandled(status)
+ }
case pbv2.HopMessage_CONNECT:
- r.handleConnect(s, &msg)
-
+ status := r.handleConnect(s, &msg)
+ if r.metricsTracer != nil {
+ r.metricsTracer.ConnectionRequestHandled(status)
+ }
default:
r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
}
}
-func (r *Relay) handleReserve(s network.Stream) {
+func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
defer s.Close()
-
p := s.Conn().RemotePeer()
a := s.Conn().RemoteMultiaddr()
if isRelayAddr(a) {
log.Debugf("refusing relay reservation for %s; reservation attempt over relay connection")
r.handleError(s, pbv2.Status_PERMISSION_DENIED)
- return
+ return pbv2.Status_PERMISSION_DENIED
}
if r.acl != nil && !r.acl.AllowReserve(p, a) {
log.Debugf("refusing relay reservation for %s; permission denied", p)
r.handleError(s, pbv2.Status_PERMISSION_DENIED)
- return
+ return pbv2.Status_PERMISSION_DENIED
}
r.mx.Lock()
+ // Check if relay is still active. Otherwise ConnManager.UnTagPeer will not be called if this block runs after
+ // Close() call
+ if r.closed {
+ r.mx.Unlock()
+ log.Debugf("refusing relay reservation for %s; relay closed", p)
+ r.handleError(s, pbv2.Status_PERMISSION_DENIED)
+ return pbv2.Status_PERMISSION_DENIED
+ }
now := time.Now()
_, exists := r.rsvp[p]
@@ -187,7 +210,7 @@ func (r *Relay) handleReserve(s network.Stream) {
r.mx.Unlock()
log.Debugf("refusing relay reservation for %s; IP constraint violation: %s", p, err)
r.handleError(s, pbv2.Status_RESERVATION_REFUSED)
- return
+ return pbv2.Status_RESERVATION_REFUSED
}
}
@@ -195,6 +218,9 @@ func (r *Relay) handleReserve(s network.Stream) {
r.rsvp[p] = expire
r.host.ConnManager().TagPeer(p, "relay-reservation", ReservationTagWeight)
r.mx.Unlock()
+ if r.metricsTracer != nil {
+ r.metricsTracer.ReservationAllowed(exists)
+ }
log.Debugf("reserving relay slot for %s", p)
@@ -204,10 +230,12 @@ func (r *Relay) handleReserve(s network.Stream) {
if err := r.writeResponse(s, pbv2.Status_OK, r.makeReservationMsg(p, expire), r.makeLimitMsg(p)); err != nil {
log.Debugf("error writing reservation response; retracting reservation for %s", p)
s.Reset()
+ return pbv2.Status_CONNECTION_FAILED
}
+ return pbv2.Status_OK
}
-func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
+func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Status {
src := s.Conn().RemotePeer()
a := s.Conn().RemoteMultiaddr()
@@ -215,7 +243,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
if err != nil {
log.Debugf("failed to begin relay transaction: %s", err)
r.handleError(s, pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
- return
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
}
fail := func(status pbv2.Status) {
@@ -227,25 +255,25 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
if err := span.ReserveMemory(2*r.rc.BufferSize, network.ReservationPriorityHigh); err != nil {
log.Debugf("error reserving memory for relay: %s", err)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
- return
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
}
if isRelayAddr(a) {
log.Debugf("refusing connection from %s; connection attempt over relay connection")
fail(pbv2.Status_PERMISSION_DENIED)
- return
+ return pbv2.Status_PERMISSION_DENIED
}
dest, err := util.PeerToPeerInfoV2(msg.GetPeer())
if err != nil {
fail(pbv2.Status_MALFORMED_MESSAGE)
- return
+ return pbv2.Status_MALFORMED_MESSAGE
}
if r.acl != nil && !r.acl.AllowConnect(src, s.Conn().RemoteMultiaddr(), dest.ID) {
log.Debugf("refusing connection from %s to %s; permission denied", src, dest.ID)
fail(pbv2.Status_PERMISSION_DENIED)
- return
+ return pbv2.Status_PERMISSION_DENIED
}
r.mx.Lock()
@@ -254,7 +282,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
r.mx.Unlock()
log.Debugf("refusing connection from %s to %s; no reservation", src, dest.ID)
fail(pbv2.Status_NO_RESERVATION)
- return
+ return pbv2.Status_NO_RESERVATION
}
srcConns := r.conns[src]
@@ -262,7 +290,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
r.mx.Unlock()
log.Debugf("refusing connection from %s to %s; too many connections from %s", src, dest.ID, src)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
- return
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
}
destConns := r.conns[dest.ID]
@@ -270,19 +298,27 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
r.mx.Unlock()
log.Debugf("refusing connection from %s to %s; too many connecitons to %s", src, dest.ID, dest.ID)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
- return
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
}
r.addConn(src)
r.addConn(dest.ID)
r.mx.Unlock()
+ if r.metricsTracer != nil {
+ r.metricsTracer.ConnectionOpened()
+ }
+ connStTime := time.Now()
+
cleanup := func() {
span.Done()
r.mx.Lock()
r.rmConn(src)
r.rmConn(dest.ID)
r.mx.Unlock()
+ if r.metricsTracer != nil {
+ r.metricsTracer.ConnectionClosed(time.Since(connStTime))
+ }
}
ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout)
@@ -295,7 +331,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
log.Debugf("error opening relay stream to %s: %s", dest.ID, err)
cleanup()
r.handleError(s, pbv2.Status_CONNECTION_FAILED)
- return
+ return pbv2.Status_CONNECTION_FAILED
}
fail = func(status pbv2.Status) {
@@ -307,14 +343,14 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
if err := bs.Scope().SetService(ServiceName); err != nil {
log.Debugf("error attaching stream to relay service: %s", err)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
- return
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
}
// handshake
if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
- log.Debugf("erro reserving memory for stream: %s", err)
+ log.Debugf("error reserving memory for stream: %s", err)
fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
- return
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
}
defer bs.Scope().ReleaseMemory(maxMessageSize)
@@ -333,7 +369,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
if err != nil {
log.Debugf("error writing stop handshake")
fail(pbv2.Status_CONNECTION_FAILED)
- return
+ return pbv2.Status_CONNECTION_FAILED
}
stopmsg.Reset()
@@ -342,19 +378,19 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
if err != nil {
log.Debugf("error reading stop response: %s", err.Error())
fail(pbv2.Status_CONNECTION_FAILED)
- return
+ return pbv2.Status_CONNECTION_FAILED
}
if t := stopmsg.GetType(); t != pbv2.StopMessage_STATUS {
log.Debugf("unexpected stop response; not a status message (%d)", t)
fail(pbv2.Status_CONNECTION_FAILED)
- return
+ return pbv2.Status_CONNECTION_FAILED
}
if status := stopmsg.GetStatus(); status != pbv2.Status_OK {
log.Debugf("relay stop failure: %d", status)
fail(pbv2.Status_CONNECTION_FAILED)
- return
+ return pbv2.Status_CONNECTION_FAILED
}
var response pbv2.HopMessage
@@ -369,7 +405,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
bs.Reset()
s.Reset()
cleanup()
- return
+ return pbv2.Status_CONNECTION_FAILED
}
// reset deadline
@@ -377,11 +413,11 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
log.Infof("relaying connection from %s to %s", src, dest.ID)
- goroutines := new(int32)
- *goroutines = 2
+ var goroutines atomic.Int32
+ goroutines.Store(2)
done := func() {
- if atomic.AddInt32(goroutines, -1) == 0 {
+ if goroutines.Add(-1) == 0 {
s.Close()
bs.Close()
cleanup()
@@ -398,6 +434,8 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) {
go r.relayUnlimited(s, bs, src, dest.ID, done)
go r.relayUnlimited(bs, s, dest.ID, src, done)
}
+
+ return pbv2.Status_OK
}
func (r *Relay) addConn(p peer.ID) {
@@ -428,7 +466,7 @@ func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, li
limitedSrc := io.LimitReader(src, limit)
- count, err := io.CopyBuffer(dest, limitedSrc, buf)
+ count, err := r.copyWithBuffer(dest, limitedSrc, buf)
if err != nil {
log.Debugf("relay copy error: %s", err)
// Reset both.
@@ -452,7 +490,7 @@ func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID,
buf := pool.Get(r.rc.BufferSize)
defer pool.Put(buf)
- count, err := io.CopyBuffer(dest, src, buf)
+ count, err := r.copyWithBuffer(dest, src, buf)
if err != nil {
log.Debugf("relay copy error: %s", err)
// Reset both.
@@ -466,6 +504,47 @@ func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID,
log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID)
}
+// errInvalidWrite means that a write returned an impossible count.
+// copied from io.errInvalidWrite
+var errInvalidWrite = errors.New("invalid write result")
+
+// copyWithBuffer copies from src to dst using the provided buf until either EOF is reached
+// on src or an error occurs. It reports the number of bytes transferred to metricsTracer.
+// The implementation is a modified form of io.CopyBuffer to support metrics tracking.
+func (r *Relay) copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ nw, ew := dst.Write(buf[0:nr])
+ if nw < 0 || nr < nw {
+ nw = 0
+ if ew == nil {
+ ew = errInvalidWrite
+ }
+ }
+ written += int64(nw)
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ if r.metricsTracer != nil {
+ r.metricsTracer.BytesTransferred(nw)
+ }
+ }
+ if er != nil {
+ if er != io.EOF {
+ err = er
+ }
+ break
+ }
+ }
+ return written, err
+}
+
func (r *Relay) handleError(s network.Stream, status pbv2.Status) {
log.Debugf("relay error: %s (%d)", pbv2.Status_name[int32(status)], status)
err := r.writeResponse(s, status, nil, nil)
@@ -545,6 +624,8 @@ func (r *Relay) makeLimitMsg(p peer.ID) *pbv2.Limit {
}
func (r *Relay) background() {
+ asnutil.Store.Init()
+
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
@@ -563,13 +644,17 @@ func (r *Relay) gc() {
defer r.mx.Unlock()
now := time.Now()
-
+ cnt := 0
for p, expire := range r.rsvp {
- if expire.Before(now) {
+ if r.closed || expire.Before(now) {
delete(r.rsvp, p)
r.host.ConnManager().UntagPeer(p, "relay-reservation")
+ cnt++
}
}
+ if r.metricsTracer != nil {
+ r.metricsTracer.ReservationClosed(cnt)
+ }
for p, count := range r.conns {
if count == 0 {
@@ -585,9 +670,15 @@ func (r *Relay) disconnected(n network.Network, c network.Conn) {
}
r.mx.Lock()
- defer r.mx.Unlock()
+ _, ok := r.rsvp[p]
+ if ok {
+ delete(r.rsvp, p)
+ }
+ r.mx.Unlock()
- delete(r.rsvp, p)
+ if ok && r.metricsTracer != nil {
+ r.metricsTracer.ReservationClosed(1)
+ }
}
func isRelayAddr(a ma.Multiaddr) bool {
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go
index de314b18e..21e888d9f 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go
@@ -5,10 +5,9 @@ import (
"io"
pool "github.com/libp2p/go-buffer-pool"
- "github.com/libp2p/go-msgio/protoio"
-
- "github.com/gogo/protobuf/proto"
+ "github.com/libp2p/go-msgio/pbio"
"github.com/multiformats/go-varint"
+ "google.golang.org/protobuf/proto"
)
type DelimitedReader struct {
@@ -62,6 +61,6 @@ func (d *DelimitedReader) ReadMsg(msg proto.Message) error {
return proto.Unmarshal(buf, msg)
}
-func NewDelimitedWriter(w io.Writer) protoio.WriteCloser {
- return protoio.NewDelimitedWriter(w)
+func NewDelimitedWriter(w io.Writer) pbio.WriteCloser {
+ return pbio.NewDelimitedWriter(w)
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go
index ae1b9b50b..f5b72bf05 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go
@@ -4,54 +4,11 @@ import (
"errors"
"github.com/libp2p/go-libp2p/core/peer"
- pbv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb"
pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
ma "github.com/multiformats/go-multiaddr"
)
-func PeerToPeerInfoV1(p *pbv1.CircuitRelay_Peer) (peer.AddrInfo, error) {
- if p == nil {
- return peer.AddrInfo{}, errors.New("nil peer")
- }
-
- id, err := peer.IDFromBytes(p.Id)
- if err != nil {
- return peer.AddrInfo{}, err
- }
-
- var addrs []ma.Multiaddr
- if len(p.Addrs) > 0 {
- addrs = make([]ma.Multiaddr, 0, len(p.Addrs))
- }
-
- for _, addrBytes := range p.Addrs {
- a, err := ma.NewMultiaddrBytes(addrBytes)
- if err == nil {
- addrs = append(addrs, a)
- }
- }
-
- return peer.AddrInfo{ID: id, Addrs: addrs}, nil
-}
-
-func PeerInfoToPeerV1(pi peer.AddrInfo) *pbv1.CircuitRelay_Peer {
- var addrs [][]byte
- if len(pi.Addrs) > 0 {
- addrs = make([][]byte, 0, len(pi.Addrs))
- }
-
- for _, addr := range pi.Addrs {
- addrs = append(addrs, addr.Bytes())
- }
-
- p := new(pbv1.CircuitRelay_Peer)
- p.Id = []byte(pi.ID)
- p.Addrs = addrs
-
- return p
-}
-
func PeerToPeerInfoV2(p *pbv2.Peer) (peer.AddrInfo, error) {
if p == nil {
return peer.AddrInfo{}, errors.New("nil peer")
@@ -62,10 +19,7 @@ func PeerToPeerInfoV2(p *pbv2.Peer) (peer.AddrInfo, error) {
return peer.AddrInfo{}, err
}
- var addrs []ma.Multiaddr
- if len(p.Addrs) > 0 {
- addrs = make([]ma.Multiaddr, 0, len(p.Addrs))
- }
+ addrs := make([]ma.Multiaddr, 0, len(p.Addrs))
for _, addrBytes := range p.Addrs {
a, err := ma.NewMultiaddrBytes(addrBytes)
@@ -78,19 +32,13 @@ func PeerToPeerInfoV2(p *pbv2.Peer) (peer.AddrInfo, error) {
}
func PeerInfoToPeerV2(pi peer.AddrInfo) *pbv2.Peer {
- var addrs [][]byte
-
- if len(pi.Addrs) > 0 {
- addrs = make([][]byte, 0, len(pi.Addrs))
- }
-
+ addrs := make([][]byte, 0, len(pi.Addrs))
for _, addr := range pi.Addrs {
addrs = append(addrs, addr.Bytes())
}
- p := new(pbv2.Peer)
- p.Id = []byte(pi.ID)
- p.Addrs = addrs
-
- return p
+ return &pbv2.Peer{
+ Id: []byte(pi.ID),
+ Addrs: addrs,
+ }
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/filter.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/filter.go
new file mode 100644
index 000000000..5c1a4f534
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/filter.go
@@ -0,0 +1,27 @@
+package holepunch
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// WithAddrFilter is a Service option that enables multiaddress filtering.
+// It allows to only send a subset of observed addresses to the remote
+// peer. E.g., only announce TCP or QUIC multi addresses instead of both.
+// It also allows to only consider a subset of received multi addresses
+// that remote peers announced to us.
+// Theoretically, this API also allows to add multi addresses in both cases.
+func WithAddrFilter(f AddrFilter) Option {
+ return func(hps *Service) error {
+ hps.filter = f
+ return nil
+ }
+}
+
+// AddrFilter defines the interface for the multi address filtering.
+type AddrFilter interface {
+ // FilterLocal filters the multi addresses that are sent to the remote peer.
+ FilterLocal(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+ // FilterRemote filters the multi addresses received from the remote peer.
+ FilterRemote(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go
index 7f2bbbd12..b651bd782 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go
@@ -10,15 +10,15 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
- pb "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
-
- "github.com/libp2p/go-msgio/protoio"
-
+ "github.com/libp2p/go-msgio/pbio"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
+//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/holepunch.proto=./pb pb/holepunch.proto
+
// ErrHolePunchActive is returned from DirectConnect when another hole punching attempt is currently running
var ErrHolePunchActive = errors.New("another hole punching attempt to this peer is active")
@@ -49,14 +49,16 @@ type holePuncher struct {
closed bool
tracer *tracer
+ filter AddrFilter
}
-func newHolePuncher(h host.Host, ids identify.IDService, tracer *tracer) *holePuncher {
+func newHolePuncher(h host.Host, ids identify.IDService, tracer *tracer, filter AddrFilter) *holePuncher {
hp := &holePuncher{
host: h,
ids: ids,
active: make(map[peer.ID]struct{}),
tracer: tracer,
+ filter: filter,
}
hp.ctx, hp.ctxCancel = context.WithCancel(context.Background())
h.Network().Notify((*netNotifiee)(hp))
@@ -99,10 +101,8 @@ func (hp *holePuncher) DirectConnect(p peer.ID) error {
func (hp *holePuncher) directConnect(rp peer.ID) error {
// short-circuit check to see if we already have a direct connection
- for _, c := range hp.host.Network().ConnsToPeer(rp) {
- if !isRelayAddress(c.RemoteMultiaddr()) {
- return nil
- }
+ if getDirectConnection(hp.host, rp) != nil {
+ return nil
}
// short-circuit hole punching if a direct dial works.
@@ -131,8 +131,8 @@ func (hp *holePuncher) directConnect(rp peer.ID) error {
log.Debugw("got inbound proxy conn", "peer", rp)
// hole punch
- for i := 0; i < maxRetries; i++ {
- addrs, rtt, err := hp.initiateHolePunch(rp)
+ for i := 1; i <= maxRetries; i++ {
+ addrs, obsAddrs, rtt, err := hp.initiateHolePunch(rp)
if err != nil {
log.Debugw("hole punching failed", "peer", rp, "error", err)
hp.tracer.ProtocolError(rp, err)
@@ -157,80 +157,97 @@ func (hp *holePuncher) directConnect(rp peer.ID) error {
hp.tracer.EndHolePunch(rp, dt, err)
if err == nil {
log.Debugw("hole punching with successful", "peer", rp, "time", dt)
+ hp.tracer.HolePunchFinished("initiator", i, addrs, obsAddrs, getDirectConnection(hp.host, rp))
return nil
}
case <-hp.ctx.Done():
timer.Stop()
return hp.ctx.Err()
}
+ if i == maxRetries {
+ hp.tracer.HolePunchFinished("initiator", maxRetries, addrs, obsAddrs, nil)
+ }
}
return fmt.Errorf("all retries for hole punch with peer %s failed", rp)
}
// initiateHolePunch opens a new hole punching coordination stream,
// exchanges the addresses and measures the RTT.
-func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, time.Duration, error) {
+func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, []ma.Multiaddr, time.Duration, error) {
hpCtx := network.WithUseTransient(hp.ctx, "hole-punch")
sCtx := network.WithNoDial(hpCtx, "hole-punch")
str, err := hp.host.NewStream(sCtx, rp, Protocol)
if err != nil {
- return nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
+ return nil, nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
}
defer str.Close()
- addr, rtt, err := hp.initiateHolePunchImpl(str)
+ addr, obsAddr, rtt, err := hp.initiateHolePunchImpl(str)
if err != nil {
log.Debugf("%s", err)
str.Reset()
- return addr, rtt, err
+ return addr, obsAddr, rtt, err
}
- return addr, rtt, err
+ return addr, obsAddr, rtt, err
}
-func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr, time.Duration, error) {
+func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr, []ma.Multiaddr, time.Duration, error) {
if err := str.Scope().SetService(ServiceName); err != nil {
- return nil, 0, fmt.Errorf("error attaching stream to holepunch service: %s", err)
+ return nil, nil, 0, fmt.Errorf("error attaching stream to holepunch service: %s", err)
}
if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
- return nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
+ return nil, nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
}
defer str.Scope().ReleaseMemory(maxMsgSize)
- w := protoio.NewDelimitedWriter(str)
- rd := protoio.NewDelimitedReader(str, maxMsgSize)
+ w := pbio.NewDelimitedWriter(str)
+ rd := pbio.NewDelimitedReader(str, maxMsgSize)
str.SetDeadline(time.Now().Add(StreamTimeout))
// send a CONNECT and start RTT measurement.
+ obsAddrs := removeRelayAddrs(hp.ids.OwnObservedAddrs())
+ if hp.filter != nil {
+ obsAddrs = hp.filter.FilterLocal(str.Conn().RemotePeer(), obsAddrs)
+ }
+ if len(obsAddrs) == 0 {
+ return nil, nil, 0, errors.New("aborting hole punch initiation as we have no public address")
+ }
+
start := time.Now()
if err := w.WriteMsg(&pb.HolePunch{
Type: pb.HolePunch_CONNECT.Enum(),
- ObsAddrs: addrsToBytes(removeRelayAddrs(hp.ids.OwnObservedAddrs())),
+ ObsAddrs: addrsToBytes(obsAddrs),
}); err != nil {
str.Reset()
- return nil, 0, err
+ return nil, nil, 0, err
}
// wait for a CONNECT message from the remote peer
var msg pb.HolePunch
if err := rd.ReadMsg(&msg); err != nil {
- return nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
+ return nil, nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
}
rtt := time.Since(start)
if t := msg.GetType(); t != pb.HolePunch_CONNECT {
- return nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
+ return nil, nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
}
+
addrs := removeRelayAddrs(addrsFromBytes(msg.ObsAddrs))
+ if hp.filter != nil {
+ addrs = hp.filter.FilterRemote(str.Conn().RemotePeer(), addrs)
+ }
+
if len(addrs) == 0 {
- return nil, 0, errors.New("didn't receive any public addresses in CONNECT")
+ return nil, nil, 0, errors.New("didn't receive any public addresses in CONNECT")
}
if err := w.WriteMsg(&pb.HolePunch{Type: pb.HolePunch_SYNC.Enum()}); err != nil {
- return nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
+ return nil, nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
}
- return addrs, rtt, nil
+ return addrs, obsAddrs, rtt, nil
}
func (hp *holePuncher) Close() error {
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/metrics.go
new file mode 100644
index 000000000..92ed20b14
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/metrics.go
@@ -0,0 +1,187 @@
+package holepunch
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_holepunch"
+
+var (
+ directDialsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "direct_dials_total",
+ Help: "Direct Dials Total",
+ },
+ []string{"outcome"},
+ )
+ hpAddressOutcomesTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "address_outcomes_total",
+ Help: "Hole Punch outcomes by Transport",
+ },
+ []string{"side", "num_attempts", "ipv", "transport", "outcome"},
+ )
+ hpOutcomesTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "outcomes_total",
+ Help: "Hole Punch outcomes overall",
+ },
+ []string{"side", "num_attempts", "outcome"},
+ )
+
+ collectors = []prometheus.Collector{
+ directDialsTotal,
+ hpAddressOutcomesTotal,
+ hpOutcomesTotal,
+ }
+)
+
+type MetricsTracer interface {
+ HolePunchFinished(side string, attemptNum int, theirAddrs []ma.Multiaddr, ourAddr []ma.Multiaddr, directConn network.ConnMultiaddrs)
+ DirectDialFinished(success bool)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ // initialise metrics's labels so that the first data point is handled correctly
+ for _, side := range []string{"initiator", "receiver"} {
+ for _, numAttempts := range []string{"1", "2", "3", "4"} {
+ for _, outcome := range []string{"success", "failed", "cancelled", "no_suitable_address"} {
+ for _, ipv := range []string{"ip4", "ip6"} {
+ for _, transport := range []string{"quic", "quic-v1", "tcp", "webtransport"} {
+ hpAddressOutcomesTotal.WithLabelValues(side, numAttempts, ipv, transport, outcome)
+ }
+ }
+ if outcome == "cancelled" {
+ // not a valid outcome for the overall holepunch metric
+ continue
+ }
+ hpOutcomesTotal.WithLabelValues(side, numAttempts, outcome)
+ }
+ }
+ }
+ return &metricsTracer{}
+}
+
+// HolePunchFinished tracks metrics completion of a holepunch. Metrics are tracked on
+// a holepunch attempt level and on individual addresses involved in a holepunch.
+//
+// outcome for an address is computed as:
+//
+// - success:
+// A direct connection was established with the peer using this address
+// - cancelled:
+// A direct connection was established with the peer but not using this address
+// - failed:
+// No direct connection was made to the peer and the peer reported an address
+// with the same transport as this address
+// - no_suitable_address:
+// The peer reported no address with the same transport as this address
+func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
+ remoteAddrs []ma.Multiaddr, localAddrs []ma.Multiaddr, directConn network.ConnMultiaddrs) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, side, getNumAttemptString(numAttempts))
+ var dipv, dtransport string
+ if directConn != nil {
+ dipv = metricshelper.GetIPVersion(directConn.LocalMultiaddr())
+ dtransport = metricshelper.GetTransport(directConn.LocalMultiaddr())
+ }
+
+ matchingAddressCount := 0
+ // calculate holepunch outcome for all the addresses involved
+ for _, la := range localAddrs {
+ lipv := metricshelper.GetIPVersion(la)
+ ltransport := metricshelper.GetTransport(la)
+
+ matchingAddress := false
+ for _, ra := range remoteAddrs {
+ ripv := metricshelper.GetIPVersion(ra)
+ rtransport := metricshelper.GetTransport(ra)
+ if ripv == lipv && rtransport == ltransport {
+ // the peer reported an address with the same transport
+ matchingAddress = true
+ matchingAddressCount++
+
+ *tags = append(*tags, ripv, rtransport)
+ if directConn != nil && dipv == ripv && dtransport == rtransport {
+ // the connection was made using this address
+ *tags = append(*tags, "success")
+ } else if directConn != nil {
+ // connection was made but not using this address
+ *tags = append(*tags, "cancelled")
+ } else {
+ // no connection was made
+ *tags = append(*tags, "failed")
+ }
+ hpAddressOutcomesTotal.WithLabelValues(*tags...).Inc()
+ *tags = (*tags)[:2] // 2 because we want to keep (side, numAttempts)
+ break
+ }
+ }
+ if !matchingAddress {
+ *tags = append(*tags, lipv, ltransport, "no_suitable_address")
+ hpAddressOutcomesTotal.WithLabelValues(*tags...).Inc()
+ *tags = (*tags)[:2] // 2 because we want to keep (side, numAttempts)
+ }
+ }
+
+ outcome := "failed"
+ if directConn != nil {
+ outcome = "success"
+ } else if matchingAddressCount == 0 {
+ // there were no matching addresses, this attempt was going to fail
+ outcome = "no_suitable_address"
+ }
+
+ *tags = append(*tags, outcome)
+ hpOutcomesTotal.WithLabelValues(*tags...).Inc()
+}
+
+func getNumAttemptString(numAttempt int) string {
+ var attemptStr = [...]string{"0", "1", "2", "3", "4", "5"}
+ if numAttempt > 5 {
+ return "> 5"
+ }
+ return attemptStr[numAttempt]
+}
+
+func (mt *metricsTracer) DirectDialFinished(success bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if success {
+ *tags = append(*tags, "success")
+ } else {
+ *tags = append(*tags, "failed")
+ }
+ directDialsTotal.WithLabelValues(*tags...).Inc()
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile
deleted file mode 100644
index eb14b5768..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go
index 3d7e21acf..ca568580c 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go
@@ -1,27 +1,24 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: holepunch.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/holepunch.proto
-package holepunch_pb
+package pb
import (
- fmt "fmt"
- github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type HolePunch_Type int32
@@ -30,15 +27,17 @@ const (
HolePunch_SYNC HolePunch_Type = 300
)
-var HolePunch_Type_name = map[int32]string{
- 100: "CONNECT",
- 300: "SYNC",
-}
-
-var HolePunch_Type_value = map[string]int32{
- "CONNECT": 100,
- "SYNC": 300,
-}
+// Enum value maps for HolePunch_Type.
+var (
+ HolePunch_Type_name = map[int32]string{
+ 100: "CONNECT",
+ 300: "SYNC",
+ }
+ HolePunch_Type_value = map[string]int32{
+ "CONNECT": 100,
+ "SYNC": 300,
+ }
+)
func (x HolePunch_Type) Enum() *HolePunch_Type {
p := new(HolePunch_Type)
@@ -47,369 +46,170 @@ func (x HolePunch_Type) Enum() *HolePunch_Type {
}
func (x HolePunch_Type) String() string {
- return proto.EnumName(HolePunch_Type_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HolePunch_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_pb_holepunch_proto_enumTypes[0].Descriptor()
+}
+
+func (HolePunch_Type) Type() protoreflect.EnumType {
+ return &file_pb_holepunch_proto_enumTypes[0]
}
-func (x *HolePunch_Type) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(HolePunch_Type_value, data, "HolePunch_Type")
+func (x HolePunch_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *HolePunch_Type) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = HolePunch_Type(value)
+ *x = HolePunch_Type(num)
return nil
}
+// Deprecated: Use HolePunch_Type.Descriptor instead.
func (HolePunch_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_290ddea0f23ef64a, []int{0, 0}
+ return file_pb_holepunch_proto_rawDescGZIP(), []int{0, 0}
}
// spec: https://github.com/libp2p/specs/blob/master/relay/DCUtR.md
type HolePunch struct {
- Type *HolePunch_Type `protobuf:"varint,1,req,name=type,enum=holepunch.pb.HolePunch_Type" json:"type,omitempty"`
- ObsAddrs [][]byte `protobuf:"bytes,2,rep,name=ObsAddrs" json:"ObsAddrs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *HolePunch) Reset() { *m = HolePunch{} }
-func (m *HolePunch) String() string { return proto.CompactTextString(m) }
-func (*HolePunch) ProtoMessage() {}
-func (*HolePunch) Descriptor() ([]byte, []int) {
- return fileDescriptor_290ddea0f23ef64a, []int{0}
-}
-func (m *HolePunch) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HolePunch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HolePunch.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HolePunch) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HolePunch.Merge(m, src)
-}
-func (m *HolePunch) XXX_Size() int {
- return m.Size()
-}
-func (m *HolePunch) XXX_DiscardUnknown() {
- xxx_messageInfo_HolePunch.DiscardUnknown(m)
+ Type *HolePunch_Type `protobuf:"varint,1,req,name=type,enum=holepunch.pb.HolePunch_Type" json:"type,omitempty"`
+ ObsAddrs [][]byte `protobuf:"bytes,2,rep,name=ObsAddrs" json:"ObsAddrs,omitempty"`
}
-var xxx_messageInfo_HolePunch proto.InternalMessageInfo
-
-func (m *HolePunch) GetType() HolePunch_Type {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *HolePunch) Reset() {
+ *x = HolePunch{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_holepunch_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return HolePunch_CONNECT
}
-func (m *HolePunch) GetObsAddrs() [][]byte {
- if m != nil {
- return m.ObsAddrs
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("holepunch.pb.HolePunch_Type", HolePunch_Type_name, HolePunch_Type_value)
- proto.RegisterType((*HolePunch)(nil), "holepunch.pb.HolePunch")
+func (x *HolePunch) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func init() { proto.RegisterFile("holepunch.proto", fileDescriptor_290ddea0f23ef64a) }
-
-var fileDescriptor_290ddea0f23ef64a = []byte{
- // 153 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcf, 0xc8, 0xcf, 0x49,
- 0x2d, 0x28, 0xcd, 0x4b, 0xce, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x12, 0x48,
- 0x52, 0xaa, 0xe4, 0xe2, 0xf4, 0xc8, 0xcf, 0x49, 0x0d, 0x00, 0xf1, 0x85, 0x0c, 0xb8, 0x58, 0x4a,
- 0x2a, 0x0b, 0x52, 0x25, 0x18, 0x15, 0x98, 0x34, 0xf8, 0x8c, 0x64, 0xf4, 0x90, 0x55, 0xea, 0xc1,
- 0x95, 0xe9, 0x85, 0x54, 0x16, 0xa4, 0x06, 0x81, 0x55, 0x0a, 0x49, 0x71, 0x71, 0xf8, 0x27, 0x15,
- 0x3b, 0xa6, 0xa4, 0x14, 0x15, 0x4b, 0x30, 0x29, 0x30, 0x6b, 0xf0, 0x04, 0xc1, 0xf9, 0x4a, 0x72,
- 0x5c, 0x2c, 0x20, 0x95, 0x42, 0xdc, 0x5c, 0xec, 0xce, 0xfe, 0x7e, 0x7e, 0xae, 0xce, 0x21, 0x02,
- 0x29, 0x42, 0x9c, 0x5c, 0x2c, 0xc1, 0x91, 0x7e, 0xce, 0x02, 0x6b, 0x98, 0x9c, 0x78, 0x4e, 0x3c,
- 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0x46, 0x40, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x34, 0x8d, 0x41, 0x7d, 0xa8, 0x00, 0x00, 0x00,
-}
+func (*HolePunch) ProtoMessage() {}
-func (m *HolePunch) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *HolePunch) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_holepunch_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return dAtA[:n], nil
+ return mi.MessageOf(x)
}
-func (m *HolePunch) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+// Deprecated: Use HolePunch.ProtoReflect.Descriptor instead.
+func (*HolePunch) Descriptor() ([]byte, []int) {
+ return file_pb_holepunch_proto_rawDescGZIP(), []int{0}
}
-func (m *HolePunch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.ObsAddrs) > 0 {
- for iNdEx := len(m.ObsAddrs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ObsAddrs[iNdEx])
- copy(dAtA[i:], m.ObsAddrs[iNdEx])
- i = encodeVarintHolepunch(dAtA, i, uint64(len(m.ObsAddrs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
+func (x *HolePunch) GetType() HolePunch_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
- if m.Type == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type")
- } else {
- i = encodeVarintHolepunch(dAtA, i, uint64(*m.Type))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
+ return HolePunch_CONNECT
}
-func encodeVarintHolepunch(dAtA []byte, offset int, v uint64) int {
- offset -= sovHolepunch(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *HolePunch) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != nil {
- n += 1 + sovHolepunch(uint64(*m.Type))
+func (x *HolePunch) GetObsAddrs() [][]byte {
+ if x != nil {
+ return x.ObsAddrs
}
- if len(m.ObsAddrs) > 0 {
- for _, b := range m.ObsAddrs {
- l = len(b)
- n += 1 + l + sovHolepunch(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+ return nil
}
-func sovHolepunch(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozHolepunch(x uint64) (n int) {
- return sovHolepunch(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *HolePunch) Unmarshal(dAtA []byte) error {
- var hasFields [1]uint64
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowHolepunch
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HolePunch: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HolePunch: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var v HolePunch_Type
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowHolepunch
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= HolePunch_Type(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Type = &v
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObsAddrs", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowHolepunch
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthHolepunch
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthHolepunch
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ObsAddrs = append(m.ObsAddrs, make([]byte, postIndex-iNdEx))
- copy(m.ObsAddrs[len(m.ObsAddrs)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipHolepunch(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthHolepunch
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type")
- }
+var File_pb_holepunch_proto protoreflect.FileDescriptor
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
+var file_pb_holepunch_proto_rawDesc = []byte{
+ 0x0a, 0x12, 0x70, 0x62, 0x2f, 0x68, 0x6f, 0x6c, 0x65, 0x70, 0x75, 0x6e, 0x63, 0x68, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x6f, 0x6c, 0x65, 0x70, 0x75, 0x6e, 0x63, 0x68, 0x2e,
+ 0x70, 0x62, 0x22, 0x79, 0x0a, 0x09, 0x48, 0x6f, 0x6c, 0x65, 0x50, 0x75, 0x6e, 0x63, 0x68, 0x12,
+ 0x30, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x1c, 0x2e,
+ 0x68, 0x6f, 0x6c, 0x65, 0x70, 0x75, 0x6e, 0x63, 0x68, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x6c,
+ 0x65, 0x50, 0x75, 0x6e, 0x63, 0x68, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4f, 0x62, 0x73, 0x41, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0c, 0x52, 0x08, 0x4f, 0x62, 0x73, 0x41, 0x64, 0x64, 0x72, 0x73, 0x22, 0x1e, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54,
+ 0x10, 0x64, 0x12, 0x09, 0x0a, 0x04, 0x53, 0x59, 0x4e, 0x43, 0x10, 0xac, 0x02,
}
-func skipHolepunch(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHolepunch
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHolepunch
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHolepunch
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthHolepunch
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupHolepunch
+
+var (
+ file_pb_holepunch_proto_rawDescOnce sync.Once
+ file_pb_holepunch_proto_rawDescData = file_pb_holepunch_proto_rawDesc
+)
+
+func file_pb_holepunch_proto_rawDescGZIP() []byte {
+ file_pb_holepunch_proto_rawDescOnce.Do(func() {
+ file_pb_holepunch_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_holepunch_proto_rawDescData)
+ })
+ return file_pb_holepunch_proto_rawDescData
+}
+
+var file_pb_holepunch_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_pb_holepunch_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pb_holepunch_proto_goTypes = []interface{}{
+ (HolePunch_Type)(0), // 0: holepunch.pb.HolePunch.Type
+ (*HolePunch)(nil), // 1: holepunch.pb.HolePunch
+}
+var file_pb_holepunch_proto_depIdxs = []int32{
+ 0, // 0: holepunch.pb.HolePunch.type:type_name -> holepunch.pb.HolePunch.Type
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pb_holepunch_proto_init() }
+func file_pb_holepunch_proto_init() {
+ if File_pb_holepunch_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_holepunch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HolePunch); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthHolepunch
- }
- if depth == 0 {
- return iNdEx, nil
}
}
- return 0, io.ErrUnexpectedEOF
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_holepunch_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_holepunch_proto_goTypes,
+ DependencyIndexes: file_pb_holepunch_proto_depIdxs,
+ EnumInfos: file_pb_holepunch_proto_enumTypes,
+ MessageInfos: file_pb_holepunch_proto_msgTypes,
+ }.Build()
+ File_pb_holepunch_proto = out.File
+ file_pb_holepunch_proto_rawDesc = nil
+ file_pb_holepunch_proto_goTypes = nil
+ file_pb_holepunch_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthHolepunch = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowHolepunch = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupHolepunch = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go
index 1df779fc3..47bf434fb 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go
@@ -7,16 +7,17 @@ import (
"sync"
"time"
+ logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
- pb "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-msgio/pbio"
- logging "github.com/ipfs/go-log/v2"
- "github.com/libp2p/go-msgio/protoio"
ma "github.com/multiformats/go-multiaddr"
)
@@ -53,6 +54,7 @@ type Service struct {
hasPublicAddrsChan chan struct{}
tracer *tracer
+ filter AddrFilter
refCount sync.WaitGroup
}
@@ -82,6 +84,7 @@ func NewService(h host.Host, ids identify.IDService, opts ...Option) (*Service,
return nil, err
}
}
+ s.tracer.Start()
s.refCount.Add(1)
go s.watchForPublicAddr()
@@ -122,7 +125,7 @@ func (s *Service) watchForPublicAddr() {
}
// Only start the holePuncher if we're behind a NAT / firewall.
- sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+ sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}, eventbus.Name("holepunch"))
if err != nil {
log.Debugf("failed to subscripe to Reachability event: %s", err)
return
@@ -140,7 +143,7 @@ func (s *Service) watchForPublicAddr() {
continue
}
s.holePuncherMx.Lock()
- s.holePuncher = newHolePuncher(s.host, s.ids, s.tracer)
+ s.holePuncher = newHolePuncher(s.host, s.ids, s.tracer, s.filter)
s.holePuncherMx.Unlock()
close(s.hasPublicAddrsChan)
return
@@ -163,25 +166,29 @@ func (s *Service) Close() error {
return err
}
-func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addrs []ma.Multiaddr, err error) {
+func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remoteAddrs []ma.Multiaddr, ownAddrs []ma.Multiaddr, err error) {
// sanity check: a hole punch request should only come from peers behind a relay
if !isRelayAddress(str.Conn().RemoteMultiaddr()) {
- return 0, nil, fmt.Errorf("received hole punch stream: %s", str.Conn().RemoteMultiaddr())
+ return 0, nil, nil, fmt.Errorf("received hole punch stream: %s", str.Conn().RemoteMultiaddr())
}
- ownAddrs := removeRelayAddrs(s.ids.OwnObservedAddrs())
+ ownAddrs = removeRelayAddrs(s.ids.OwnObservedAddrs())
+ if s.filter != nil {
+ ownAddrs = s.filter.FilterLocal(str.Conn().RemotePeer(), ownAddrs)
+ }
+
// If we can't tell the peer where to dial us, there's no point in starting the hole punching.
if len(ownAddrs) == 0 {
- return 0, nil, errors.New("rejecting hole punch request, as we don't have any public addresses")
+ return 0, nil, nil, errors.New("rejecting hole punch request, as we don't have any public addresses")
}
if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
log.Debugf("error reserving memory for stream: %s, err")
- return 0, nil, err
+ return 0, nil, nil, err
}
defer str.Scope().ReleaseMemory(maxMsgSize)
- wr := protoio.NewDelimitedWriter(str)
- rd := protoio.NewDelimitedReader(str, maxMsgSize)
+ wr := pbio.NewDelimitedWriter(str)
+ rd := pbio.NewDelimitedReader(str, maxMsgSize)
// Read Connect message
msg := new(pb.HolePunch)
@@ -189,15 +196,20 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addr
str.SetDeadline(time.Now().Add(StreamTimeout))
if err := rd.ReadMsg(msg); err != nil {
- return 0, nil, fmt.Errorf("failed to read message from initator: %w", err)
+ return 0, nil, nil, fmt.Errorf("failed to read message from initator: %w", err)
}
if t := msg.GetType(); t != pb.HolePunch_CONNECT {
- return 0, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
+ return 0, nil, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
}
+
obsDial := removeRelayAddrs(addrsFromBytes(msg.ObsAddrs))
+ if s.filter != nil {
+ obsDial = s.filter.FilterRemote(str.Conn().RemotePeer(), obsDial)
+ }
+
log.Debugw("received hole punch request", "peer", str.Conn().RemotePeer(), "addrs", obsDial)
if len(obsDial) == 0 {
- return 0, nil, errors.New("expected CONNECT message to contain at least one address")
+ return 0, nil, nil, errors.New("expected CONNECT message to contain at least one address")
}
// Write CONNECT message
@@ -206,18 +218,18 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addr
msg.ObsAddrs = addrsToBytes(ownAddrs)
tstart := time.Now()
if err := wr.WriteMsg(msg); err != nil {
- return 0, nil, fmt.Errorf("failed to write CONNECT message to initator: %w", err)
+ return 0, nil, nil, fmt.Errorf("failed to write CONNECT message to initator: %w", err)
}
// Read SYNC message
msg.Reset()
if err := rd.ReadMsg(msg); err != nil {
- return 0, nil, fmt.Errorf("failed to read message from initator: %w", err)
+ return 0, nil, nil, fmt.Errorf("failed to read message from initator: %w", err)
}
if t := msg.GetType(); t != pb.HolePunch_SYNC {
- return 0, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
+ return 0, nil, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
}
- return time.Since(tstart), obsDial, nil
+ return time.Since(tstart), obsDial, ownAddrs, nil
}
func (s *Service) handleNewStream(str network.Stream) {
@@ -238,7 +250,7 @@ func (s *Service) handleNewStream(str network.Stream) {
}
rp := str.Conn().RemotePeer()
- rtt, addrs, err := s.incomingHolePunch(str)
+ rtt, addrs, ownAddrs, err := s.incomingHolePunch(str)
if err != nil {
s.tracer.ProtocolError(rp, err)
log.Debugw("error handling holepunching stream from", "peer", rp, "error", err)
@@ -259,6 +271,7 @@ func (s *Service) handleNewStream(str network.Stream) {
err = holePunchConnect(s.ctx, s.host, pi, false)
dt := time.Since(start)
s.tracer.EndHolePunch(rp, dt, err)
+ s.tracer.HolePunchFinished("receiver", 1, addrs, ownAddrs, getDirectConnection(s.host, rp))
}
// DirectConnect is only exposed for testing purposes.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go
index 87aa61367..82e0ebfc0 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go
@@ -2,10 +2,10 @@ package holepunch
import (
"context"
- "fmt"
"sync"
"time"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
@@ -16,27 +16,57 @@ const (
tracerCacheDuration = 5 * time.Minute
)
-// WithTracer is a Service option that enables hole punching tracing
-func WithTracer(tr EventTracer) Option {
+// WithTracer enables holepunch tracing with EventTracer et
+func WithTracer(et EventTracer) Option {
return func(hps *Service) error {
- t := &tracer{
- tr: tr,
+ hps.tracer = &tracer{
+ et: et,
+ mt: nil,
+ self: hps.host.ID(),
+ peers: make(map[peer.ID]struct {
+ counter int
+ last time.Time
+ }),
+ }
+ return nil
+ }
+}
+
+// WithMetricsTracer enables holepunch Tracing with MetricsTracer mt
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(hps *Service) error {
+ hps.tracer = &tracer{
+ et: nil,
+ mt: mt,
+ self: hps.host.ID(),
+ peers: make(map[peer.ID]struct {
+ counter int
+ last time.Time
+ }),
+ }
+ return nil
+ }
+}
+
+// WithMetricsAndEventTracer enables holepunch tracking with MetricsTracer and EventTracer
+func WithMetricsAndEventTracer(mt MetricsTracer, et EventTracer) Option {
+ return func(hps *Service) error {
+ hps.tracer = &tracer{
+ et: et,
+ mt: mt,
self: hps.host.ID(),
peers: make(map[peer.ID]struct {
counter int
last time.Time
}),
}
- t.refCount.Add(1)
- t.ctx, t.ctxCancel = context.WithCancel(context.Background())
- go t.gc()
- hps.tracer = t
return nil
}
}
type tracer struct {
- tr EventTracer
+ et EventTracer
+ mt MetricsTracer
self peer.ID
refCount sync.WaitGroup
@@ -103,16 +133,22 @@ func (t *tracer) DirectDialSuccessful(p peer.ID, dt time.Duration) {
return
}
- t.tr.Trace(&Event{
- Timestamp: time.Now().UnixNano(),
- Peer: t.self,
- Remote: p,
- Type: DirectDialEvtT,
- Evt: &DirectDialEvt{
- Success: true,
- EllapsedTime: dt,
- },
- })
+ if t.et != nil {
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: DirectDialEvtT,
+ Evt: &DirectDialEvt{
+ Success: true,
+ EllapsedTime: dt,
+ },
+ })
+ }
+
+ if t.mt != nil {
+ t.mt.DirectDialFinished(true)
+ }
}
func (t *tracer) DirectDialFailed(p peer.ID, dt time.Duration, err error) {
@@ -120,114 +156,117 @@ func (t *tracer) DirectDialFailed(p peer.ID, dt time.Duration, err error) {
return
}
- t.tr.Trace(&Event{
- Timestamp: time.Now().UnixNano(),
- Peer: t.self,
- Remote: p,
- Type: DirectDialEvtT,
- Evt: &DirectDialEvt{
- Success: false,
- EllapsedTime: dt,
- Error: err.Error(),
- },
- })
+ if t.et != nil {
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: DirectDialEvtT,
+ Evt: &DirectDialEvt{
+ Success: false,
+ EllapsedTime: dt,
+ Error: err.Error(),
+ },
+ })
+ }
+
+ if t.mt != nil {
+ t.mt.DirectDialFinished(false)
+ }
}
func (t *tracer) ProtocolError(p peer.ID, err error) {
- if t == nil {
- return
+ if t != nil && t.et != nil {
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: ProtocolErrorEvtT,
+ Evt: &ProtocolErrorEvt{
+ Error: err.Error(),
+ },
+ })
}
-
- t.tr.Trace(&Event{
- Timestamp: time.Now().UnixNano(),
- Peer: t.self,
- Remote: p,
- Type: ProtocolErrorEvtT,
- Evt: &ProtocolErrorEvt{
- Error: err.Error(),
- },
- })
}
func (t *tracer) StartHolePunch(p peer.ID, obsAddrs []ma.Multiaddr, rtt time.Duration) {
- if t == nil {
- return
- }
+ if t != nil && t.et != nil {
+ addrs := make([]string, 0, len(obsAddrs))
+ for _, a := range obsAddrs {
+ addrs = append(addrs, a.String())
+ }
- addrs := make([]string, 0, len(obsAddrs))
- for _, a := range obsAddrs {
- addrs = append(addrs, a.String())
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: StartHolePunchEvtT,
+ Evt: &StartHolePunchEvt{
+ RemoteAddrs: addrs,
+ RTT: rtt,
+ },
+ })
}
-
- t.tr.Trace(&Event{
- Timestamp: time.Now().UnixNano(),
- Peer: t.self,
- Remote: p,
- Type: StartHolePunchEvtT,
- Evt: &StartHolePunchEvt{
- RemoteAddrs: addrs,
- RTT: rtt,
- },
- })
}
func (t *tracer) EndHolePunch(p peer.ID, dt time.Duration, err error) {
- if t == nil {
- return
- }
+ if t != nil && t.et != nil {
+ evt := &EndHolePunchEvt{
+ Success: err == nil,
+ EllapsedTime: dt,
+ }
+ if err != nil {
+ evt.Error = err.Error()
+ }
- evt := &EndHolePunchEvt{
- Success: err == nil,
- EllapsedTime: dt,
- }
- if err != nil {
- evt.Error = err.Error()
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: EndHolePunchEvtT,
+ Evt: evt,
+ })
}
+}
- t.tr.Trace(&Event{
- Timestamp: time.Now().UnixNano(),
- Peer: t.self,
- Remote: p,
- Type: EndHolePunchEvtT,
- Evt: evt,
- })
+func (t *tracer) HolePunchFinished(side string, numAttempts int, theirAddrs []ma.Multiaddr, ourAddrs []ma.Multiaddr, directConn network.Conn) {
+ if t != nil && t.mt != nil {
+ t.mt.HolePunchFinished(side, numAttempts, theirAddrs, ourAddrs, directConn)
+ }
}
func (t *tracer) HolePunchAttempt(p peer.ID) {
- if t == nil {
- return
+ if t != nil && t.et != nil {
+ now := time.Now()
+ t.mutex.Lock()
+ attempt := t.peers[p]
+ attempt.counter++
+ counter := attempt.counter
+ attempt.last = now
+ t.peers[p] = attempt
+ t.mutex.Unlock()
+
+ t.et.Trace(&Event{
+ Timestamp: now.UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: HolePunchAttemptEvtT,
+ Evt: &HolePunchAttemptEvt{Attempt: counter},
+ })
}
-
- now := time.Now()
- t.mutex.Lock()
- attempt := t.peers[p]
- attempt.counter++
- counter := attempt.counter
- attempt.last = now
- t.peers[p] = attempt
- t.mutex.Unlock()
-
- t.tr.Trace(&Event{
- Timestamp: now.UnixNano(),
- Peer: t.self,
- Remote: p,
- Type: HolePunchAttemptEvtT,
- Evt: &HolePunchAttemptEvt{Attempt: counter},
- })
}
+// gc cleans up the peers map. This is only run when tracer is initialised with a non nil
+// EventTracer
func (t *tracer) gc() {
- defer func() {
- fmt.Println("done")
- t.refCount.Done()
- }()
-
+ defer t.refCount.Done()
timer := time.NewTicker(tracerGCInterval)
defer timer.Stop()
for {
select {
- case now := <-timer.C:
+ case <-timer.C:
+ now := time.Now()
t.mutex.Lock()
for id, entry := range t.peers {
if entry.last.Before(now.Add(-tracerCacheDuration)) {
@@ -241,12 +280,18 @@ func (t *tracer) gc() {
}
}
-func (t *tracer) Close() error {
- if t == nil {
- return nil
+func (t *tracer) Start() {
+ if t != nil && t.et != nil {
+ t.ctx, t.ctxCancel = context.WithCancel(context.Background())
+ t.refCount.Add(1)
+ go t.gc()
}
+}
- t.ctxCancel()
- t.refCount.Wait()
+func (t *tracer) Close() error {
+ if t != nil && t.et != nil {
+ t.ctxCancel()
+ t.refCount.Wait()
+ }
return nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/util.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/util.go
index 825f855ee..13013568f 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/util.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/util.go
@@ -55,6 +55,15 @@ func addrsFromBytes(bzs [][]byte) []ma.Multiaddr {
return addrs
}
+func getDirectConnection(h host.Host, p peer.ID) network.Conn {
+ for _, c := range h.Network().ConnsToPeer(p) {
+ if !isRelayAddress(c.RemoteMultiaddr()) {
+ return c
+ }
+ }
+ return nil
+}
+
func holePunchConnect(ctx context.Context, host host.Host, pi peer.AddrInfo, isClient bool) error {
holePunchCtx := network.WithSimultaneousConnect(ctx, isClient, "hole-punching")
forceDirectConnCtx := network.WithForceDirectDial(holePunchCtx, "hole-punching")
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go
index 4794619d9..e997060ab 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go
@@ -1,65 +1,92 @@
package identify
import (
+ "bytes"
"context"
+ "errors"
"fmt"
"io"
+ "sort"
"sync"
"time"
+ "golang.org/x/exp/slices"
+
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/record"
-
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
- pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
-
- "github.com/libp2p/go-msgio/protoio"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
- "github.com/gogo/protobuf/proto"
logging "github.com/ipfs/go-log/v2"
+ "github.com/libp2p/go-msgio/pbio"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
msmux "github.com/multiformats/go-multistream"
+ "google.golang.org/protobuf/proto"
)
-var log = logging.Logger("net/identify")
+//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/identify.proto=./pb pb/identify.proto
-// ID is the protocol.ID of version 1.0.0 of the identify
-// service.
-const ID = "/ipfs/id/1.0.0"
+var log = logging.Logger("net/identify")
-// LibP2PVersion holds the current protocol version for a client running this code
-// TODO(jbenet): fix the versioning mess.
-// XXX: Don't change this till 2020. You'll break all go-ipfs versions prior to
-// 0.4.17 which asserted an exact version match.
-const LibP2PVersion = "ipfs/0.1.0"
+const (
+ // ID is the protocol.ID of version 1.0.0 of the identify service.
+ ID = "/ipfs/id/1.0.0"
+ // IDPush is the protocol.ID of the Identify push protocol.
+ // It sends full identify messages containing the current state of the peer.
+ IDPush = "/ipfs/id/push/1.0.0"
+)
const ServiceName = "libp2p.identify"
const maxPushConcurrency = 32
-// StreamReadTimeout is the read timeout on all incoming Identify family streams.
-var StreamReadTimeout = 60 * time.Second
+var Timeout = 60 * time.Second // timeout on all incoming Identify interactions
-var (
- legacyIDSize = 2 * 1024 // 2k Bytes
- signedIDSize = 8 * 1024 // 8K
- maxMessages = 10
- defaultUserAgent = "github.com/libp2p/go-libp2p"
+const (
+ legacyIDSize = 2 * 1024 // 2k Bytes
+ signedIDSize = 8 * 1024 // 8K
+ maxMessages = 10
)
-type addPeerHandlerReq struct {
- rp peer.ID
- resp chan *peerHandler
+var defaultUserAgent = "github.com/libp2p/go-libp2p"
+
+type identifySnapshot struct {
+ seq uint64
+ protocols []protocol.ID
+ addrs []ma.Multiaddr
+ record *record.Envelope
}
-type rmPeerHandlerReq struct {
- p peer.ID
+// Equal says if two snapshots are identical.
+// It does NOT compare the sequence number.
+func (s identifySnapshot) Equal(other *identifySnapshot) bool {
+ hasRecord := s.record != nil
+ otherHasRecord := other.record != nil
+ if hasRecord != otherHasRecord {
+ return false
+ }
+ if hasRecord && !s.record.Equal(other.record) {
+ return false
+ }
+ if !slices.Equal(s.protocols, other.protocols) {
+ return false
+ }
+ if len(s.addrs) != len(other.addrs) {
+ return false
+ }
+ for i, a := range s.addrs {
+ if !a.Equal(other.addrs[i]) {
+ return false
+ }
+ }
+ return true
}
type IDService interface {
@@ -77,31 +104,58 @@ type IDService interface {
// ObservedAddrsFor returns the addresses peers have reported we've dialed from,
// for a specific local address.
ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr
+ Start()
io.Closer
}
+type identifyPushSupport uint8
+
+const (
+ identifyPushSupportUnknown identifyPushSupport = iota
+ identifyPushSupported
+ identifyPushUnsupported
+)
+
+type entry struct {
+ // The IdentifyWaitChan is created when IdentifyWait is called for the first time.
+ // IdentifyWait closes this channel when the Identify request completes, or when it fails.
+ IdentifyWaitChan chan struct{}
+
+ // PushSupport saves our knowledge about the peer's support of the Identify Push protocol.
+ // Before the identify request returns, we don't know yet if the peer supports Identify Push.
+ PushSupport identifyPushSupport
+ // Sequence is the sequence number of the last snapshot we sent to this peer.
+ Sequence uint64
+}
+
// idService is a structure that implements ProtocolIdentify.
// It is a trivial service that gives the other peer some
// useful information about the local peer. A sort of hello.
//
// The idService sends:
-// - Our IPFS Protocol Version
-// - Our IPFS Agent Version
+// - Our libp2p Protocol Version
+// - Our libp2p Agent Version
// - Our public Listen Addresses
type idService struct {
- Host host.Host
- UserAgent string
+ Host host.Host
+ UserAgent string
+ ProtocolVersion string
+
+ metricsTracer MetricsTracer
- ctx context.Context
- ctxCancel context.CancelFunc
+ setupCompleted chan struct{} // is closed when Start has finished setting up
+ ctx context.Context
+ ctxCancel context.CancelFunc
// track resources that need to be shut down before we shut down
refCount sync.WaitGroup
disableSignedPeerRecord bool
- // Identified connections (finished and in progress).
connsMu sync.RWMutex
- conns map[network.Conn]chan struct{}
+ // The conns map contains all connections we're currently handling.
+ // Connections are inserted as soon as they're available in the swarm
+ // Connections are removed from the map when the connection disconnects.
+ conns map[network.Conn]entry
addrMu sync.Mutex
@@ -114,12 +168,10 @@ type idService struct {
evtPeerIdentificationFailed event.Emitter
}
- addPeerHandlerCh chan addPeerHandlerReq
- rmPeerHandlerCh chan rmPeerHandlerReq
-
- // pushSemaphore limits the push/delta concurrency to avoid storms
- // that clog the transient scope.
- pushSemaphore chan struct{}
+ currentSnapshot struct {
+ sync.Mutex
+ snapshot identifySnapshot
+ }
}
// NewIDService constructs a new *idService and activates it by
@@ -135,23 +187,18 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
userAgent = cfg.userAgent
}
+ ctx, cancel := context.WithCancel(context.Background())
s := &idService{
- Host: h,
- UserAgent: userAgent,
-
- conns: make(map[network.Conn]chan struct{}),
-
+ Host: h,
+ UserAgent: userAgent,
+ ProtocolVersion: cfg.protocolVersion,
+ ctx: ctx,
+ ctxCancel: cancel,
+ conns: make(map[network.Conn]entry),
disableSignedPeerRecord: cfg.disableSignedPeerRecord,
-
- addPeerHandlerCh: make(chan addPeerHandlerReq),
- rmPeerHandlerCh: make(chan rmPeerHandlerReq),
-
- pushSemaphore: make(chan struct{}, maxPushConcurrency),
+ setupCompleted: make(chan struct{}),
+ metricsTracer: cfg.metricsTracer,
}
- s.ctx, s.ctxCancel = context.WithCancel(context.Background())
-
- // handle local protocol handler updates, and push deltas to peers.
- var err error
observedAddrs, err := NewObservedAddrManager(h)
if err != nil {
@@ -159,9 +206,6 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
}
s.observedAddrs = observedAddrs
- s.refCount.Add(1)
- go s.loop()
-
s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
if err != nil {
log.Warnf("identify service not emitting peer protocol updates; err: %s", err)
@@ -174,113 +218,125 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
if err != nil {
log.Warnf("identify service not emitting identification failed events; err: %s", err)
}
+ return s, nil
+}
- // register protocols that do not depend on peer records.
- h.SetStreamHandler(IDDelta, s.deltaHandler)
- h.SetStreamHandler(ID, s.sendIdentifyResp)
- h.SetStreamHandler(IDPush, s.pushHandler)
+func (ids *idService) Start() {
+ ids.Host.Network().Notify((*netNotifiee)(ids))
+ ids.Host.SetStreamHandler(ID, ids.handleIdentifyRequest)
+ ids.Host.SetStreamHandler(IDPush, ids.handlePush)
+ ids.updateSnapshot()
+ close(ids.setupCompleted)
- h.Network().Notify((*netNotifiee)(s))
- return s, nil
+ ids.refCount.Add(1)
+ go ids.loop(ids.ctx)
}
-func (ids *idService) loop() {
+func (ids *idService) loop(ctx context.Context) {
defer ids.refCount.Done()
- phs := make(map[peer.ID]*peerHandler)
- sub, err := ids.Host.EventBus().Subscribe([]interface{}{&event.EvtLocalProtocolsUpdated{},
- &event.EvtLocalAddressesUpdated{}}, eventbus.BufSize(256))
+ sub, err := ids.Host.EventBus().Subscribe(
+ []any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}},
+ eventbus.BufSize(256),
+ eventbus.Name("identify (loop)"),
+ )
if err != nil {
log.Errorf("failed to subscribe to events on the bus, err=%s", err)
return
}
+ defer sub.Close()
- phClosedCh := make(chan peer.ID)
+ // Send pushes from a separate Go routine.
+ // That way, we can end up with
+ // * this Go routine busy looping over all peers in sendPushes
+ // * another push being queued in the triggerPush channel
+ triggerPush := make(chan struct{}, 1)
+ ids.refCount.Add(1)
+ go func() {
+ defer ids.refCount.Done()
- defer func() {
- sub.Close()
- // The context will cancel the workers. Now, wait for them to
- // exit.
- for range phs {
- <-phClosedCh
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-triggerPush:
+ ids.sendPushes(ctx)
+ }
}
}()
- // Use a fresh context for the handlers. Otherwise, they'll get canceled
- // before we're ready to shutdown and they'll have "stopped" without us
- // _calling_ stop.
- handlerCtx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
for {
select {
- case addReq := <-ids.addPeerHandlerCh:
- rp := addReq.rp
- ph, ok := phs[rp]
- if !ok && ids.Host.Network().Connectedness(rp) == network.Connected {
- ph = newPeerHandler(rp, ids)
- ph.start(handlerCtx, func() { phClosedCh <- rp })
- phs[rp] = ph
+ case e, ok := <-sub.Out():
+ if !ok {
+ return
}
- addReq.resp <- ph
- case rmReq := <-ids.rmPeerHandlerCh:
- rp := rmReq.p
- if ids.Host.Network().Connectedness(rp) != network.Connected {
- // before we remove the peerhandler, we should ensure that it will not send any
- // more messages. Otherwise, we might create a new handler and the Identify response
- // synchronized with the new handler might be overwritten by a message sent by this "old" handler.
- ph, ok := phs[rp]
- if !ok {
- // move on, move on, there's nothing to see here.
- continue
- }
- // This is idempotent if already stopped.
- ph.stop()
+ if updated := ids.updateSnapshot(); !updated {
+ continue
}
-
- case rp := <-phClosedCh:
- ph := phs[rp]
-
- // If we are connected to the peer, it means that we got a connection from the peer
- // before we could finish removing it's handler on the previous disconnection.
- // If we delete the handler, we wont be able to push updates to it
- // till we see a new connection. So, we should restart the handler.
- // The fact that we got the handler on this channel means that it's context and handler
- // have completed because we write the handler to this chanel only after it closed.
- if ids.Host.Network().Connectedness(rp) == network.Connected {
- ph.start(handlerCtx, func() { phClosedCh <- rp })
- } else {
- delete(phs, rp)
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.TriggeredPushes(e)
+ }
+ select {
+ case triggerPush <- struct{}{}:
+ default: // we already have one more push queued, no need to queue another one
}
+ case <-ctx.Done():
+ return
+ }
+ }
+}
- case e, more := <-sub.Out():
- if !more {
+func (ids *idService) sendPushes(ctx context.Context) {
+ ids.connsMu.RLock()
+ conns := make([]network.Conn, 0, len(ids.conns))
+ for c, e := range ids.conns {
+ // Push even if we don't know if push is supported.
+ // This will be only the case while the IdentifyWaitChan call is in flight.
+ if e.PushSupport == identifyPushSupported || e.PushSupport == identifyPushSupportUnknown {
+ conns = append(conns, c)
+ }
+ }
+ ids.connsMu.RUnlock()
+
+ sem := make(chan struct{}, maxPushConcurrency)
+ var wg sync.WaitGroup
+ for _, c := range conns {
+ // check if the connection is still alive
+ ids.connsMu.RLock()
+ e, ok := ids.conns[c]
+ ids.connsMu.RUnlock()
+ if !ok {
+ continue
+ }
+ // check if we already sent the current snapshot to this peer
+ ids.currentSnapshot.Lock()
+ snapshot := ids.currentSnapshot.snapshot
+ ids.currentSnapshot.Unlock()
+ if e.Sequence >= snapshot.seq {
+ log.Debugw("already sent this snapshot to peer", "peer", c.RemotePeer(), "seq", snapshot.seq)
+ continue
+ }
+ // we haven't, send it now
+ sem <- struct{}{}
+ wg.Add(1)
+ go func(c network.Conn) {
+ defer wg.Done()
+ defer func() { <-sem }()
+ ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
+ defer cancel()
+ str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush)
+ if err != nil { // connection might have been closed recently
return
}
- switch e.(type) {
- case event.EvtLocalAddressesUpdated:
- for pid := range phs {
- select {
- case phs[pid].pushCh <- struct{}{}:
- default:
- log.Debugf("dropping addr updated message for %s as buffer full", pid.Pretty())
- }
- }
-
- case event.EvtLocalProtocolsUpdated:
- for pid := range phs {
- select {
- case phs[pid].deltaCh <- struct{}{}:
- default:
- log.Debugf("dropping protocol updated message for %s as buffer full", pid.Pretty())
- }
- }
+ // TODO: find out if the peer supports push if we didn't have any information about push support
+ if err := ids.sendIdentifyResp(str, true); err != nil {
+ log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err)
+ return
}
-
- case <-ids.ctx.Done():
- return
- }
+ }(c)
}
+ wg.Wait()
}
// Close shuts down the idService
@@ -299,60 +355,68 @@ func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr {
return ids.observedAddrs.AddrsFor(local)
}
+// IdentifyConn runs the Identify protocol on a connection.
+// It returns when we've received the peer's Identify message (or the request fails).
+// If successful, the peer store will contain the peer's addresses and supported protocols.
func (ids *idService) IdentifyConn(c network.Conn) {
<-ids.IdentifyWait(c)
}
+// IdentifyWait runs the Identify protocol on a connection.
+// It doesn't block and returns a channel that is closed when we receive
+// the peer's Identify message (or the request fails).
+// If successful, the peer store will contain the peer's addresses and supported protocols.
func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
- ids.connsMu.RLock()
- wait, found := ids.conns[c]
- ids.connsMu.RUnlock()
-
- if found {
- return wait
- }
-
ids.connsMu.Lock()
defer ids.connsMu.Unlock()
- wait, found = ids.conns[c]
+ e, found := ids.conns[c]
if !found {
- wait = make(chan struct{})
- ids.conns[c] = wait
-
- // Spawn an identify. The connection may actually be closed
- // already, but that doesn't really matter. We'll fail to open a
- // stream then forget the connection.
- go func() {
- defer close(wait)
- if err := ids.identifyConn(c); err != nil {
- log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
- ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
- return
- }
- ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
- }()
+ // No entry found. We may have gotten an out of order notification. Check it we should have this conn (because we're still connected)
+ // We hold the ids.connsMu lock so this is safe since a disconnect event will be processed later if we are connected.
+ if c.IsClosed() {
+ log.Debugw("connection not found in identify service", "peer", c.RemotePeer())
+ ch := make(chan struct{})
+ close(ch)
+ return ch
+ } else {
+ ids.addConnWithLock(c)
+ }
}
- return wait
-}
+ if e.IdentifyWaitChan != nil {
+ return e.IdentifyWaitChan
+ }
+ // First call to IdentifyWait for this connection. Create the channel.
+ e.IdentifyWaitChan = make(chan struct{})
+ ids.conns[c] = e
-func (ids *idService) removeConn(c network.Conn) {
- ids.connsMu.Lock()
- delete(ids.conns, c)
- ids.connsMu.Unlock()
+ // Spawn an identify. The connection may actually be closed
+ // already, but that doesn't really matter. We'll fail to open a
+ // stream then forget the connection.
+ go func() {
+ defer close(e.IdentifyWaitChan)
+ if err := ids.identifyConn(c); err != nil {
+ log.Warnf("failed to identify %s: %s", c.RemotePeer(), err)
+ ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
+ return
+ }
+
+ ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()})
+ }()
+
+ return e.IdentifyWaitChan
}
func (ids *idService) identifyConn(c network.Conn) error {
- s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
+ ctx, cancel := context.WithTimeout(context.Background(), Timeout)
+ defer cancel()
+ s, err := c.NewStream(network.WithUseTransient(ctx, "identify"))
if err != nil {
- log.Debugw("error opening identify stream", "error", err)
-
- // We usually do this on disconnect, but we may have already
- // processed the disconnect event.
- ids.removeConn(c)
+ log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err)
return err
}
+ s.SetDeadline(time.Now().Add(Timeout))
if err := s.SetProtocol(ID); err != nil {
log.Warnf("error setting identify protocol for stream: %s", err)
@@ -366,48 +430,60 @@ func (ids *idService) identifyConn(c network.Conn) error {
return err
}
- return ids.handleIdentifyResponse(s)
+ return ids.handleIdentifyResponse(s, false)
}
-func (ids *idService) sendIdentifyResp(s network.Stream) {
+// handlePush handles incoming identify push streams
+func (ids *idService) handlePush(s network.Stream) {
+ s.SetDeadline(time.Now().Add(Timeout))
+ ids.handleIdentifyResponse(s, true)
+}
+
+func (ids *idService) handleIdentifyRequest(s network.Stream) {
+ _ = ids.sendIdentifyResp(s, false)
+}
+
+func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
if err := s.Scope().SetService(ServiceName); err != nil {
- log.Warnf("error attaching stream to identify service: %s", err)
s.Reset()
- return
+ return fmt.Errorf("failed to attaching stream to identify service: %w", err)
}
-
defer s.Close()
- c := s.Conn()
+ ids.currentSnapshot.Lock()
+ snapshot := ids.currentSnapshot.snapshot
+ ids.currentSnapshot.Unlock()
- phCh := make(chan *peerHandler, 1)
- select {
- case ids.addPeerHandlerCh <- addPeerHandlerReq{c.RemotePeer(), phCh}:
- case <-ids.ctx.Done():
- return
- }
+ log.Debugw("sending snapshot", "seq", snapshot.seq, "protocols", snapshot.protocols, "addrs", snapshot.addrs)
- var ph *peerHandler
- select {
- case ph = <-phCh:
- case <-ids.ctx.Done():
- return
+ mes := ids.createBaseIdentifyResponse(s.Conn(), &snapshot)
+ mes.SignedPeerRecord = ids.getSignedRecord(&snapshot)
+
+ log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr())
+ if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil {
+ return err
}
- if ph == nil {
- // Peer disconnected, abort.
- s.Reset()
- return
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.IdentifySent(isPush, len(mes.Protocols), len(mes.ListenAddrs))
}
- ph.snapshotMu.RLock()
- snapshot := ph.snapshot
- ph.snapshotMu.RUnlock()
- ids.writeChunkedIdentifyMsg(c, snapshot, s)
- log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr())
+ ids.connsMu.Lock()
+ defer ids.connsMu.Unlock()
+ e, ok := ids.conns[s.Conn()]
+ // The connection might already have been closed.
+ // We *should* receive the Connected notification from the swarm before we're able to accept the peer's
+ // Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here.
+ // The only consequence would be that we send a spurious Push to that peer later.
+ if !ok {
+ return nil
+ }
+ e.Sequence = snapshot.seq
+ ids.conns[s.Conn()] = e
+ return nil
}
-func (ids *idService) handleIdentifyResponse(s network.Stream) error {
+func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) error {
if err := s.Scope().SetService(ServiceName); err != nil {
log.Warnf("error attaching stream to identify service: %s", err)
s.Reset()
@@ -421,11 +497,9 @@ func (ids *idService) handleIdentifyResponse(s network.Stream) error {
}
defer s.Scope().ReleaseMemory(signedIDSize)
- _ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
-
c := s.Conn()
- r := protoio.NewDelimitedReader(s, signedIDSize)
+ r := pbio.NewDelimitedReader(s, signedIDSize)
mes := &pb.Identify{}
if err := readAllIDMessages(r, mes); err != nil {
@@ -438,12 +512,34 @@ func (ids *idService) handleIdentifyResponse(s network.Stream) error {
log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
- ids.consumeMessage(mes, c)
+ ids.consumeMessage(mes, c, isPush)
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.IdentifyReceived(isPush, len(mes.Protocols), len(mes.ListenAddrs))
+ }
+
+ ids.connsMu.Lock()
+ defer ids.connsMu.Unlock()
+ e, ok := ids.conns[c]
+ if !ok { // might already have disconnected
+ return nil
+ }
+ sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush)
+ if supportsIdentifyPush := err == nil && len(sup) > 0; supportsIdentifyPush {
+ e.PushSupport = identifyPushSupported
+ } else {
+ e.PushSupport = identifyPushUnsupported
+ }
+
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.ConnPushSupport(e.PushSupport)
+ }
+
+ ids.conns[c] = e
return nil
}
-func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error {
+func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
mes := &pb.Identify{}
for i := 0; i < maxMessages; i++ {
switch err := r.ReadMsg(mes); err {
@@ -459,50 +555,60 @@ func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error {
return fmt.Errorf("too many parts")
}
-func (ids *idService) getSnapshot() *identifySnapshot {
- snapshot := new(identifySnapshot)
+func (ids *idService) updateSnapshot() (updated bool) {
+ addrs := ids.Host.Addrs()
+ sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) == -1 })
+ protos := ids.Host.Mux().Protocols()
+ sort.Slice(protos, func(i, j int) bool { return protos[i] < protos[j] })
+ snapshot := identifySnapshot{
+ addrs: addrs,
+ protocols: protos,
+ }
+
if !ids.disableSignedPeerRecord {
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
}
}
- snapshot.addrs = ids.Host.Addrs()
- snapshot.protocols = ids.Host.Mux().Protocols()
- return snapshot
+
+ ids.currentSnapshot.Lock()
+ defer ids.currentSnapshot.Unlock()
+
+ if ids.currentSnapshot.snapshot.Equal(&snapshot) {
+ return false
+ }
+
+ snapshot.seq = ids.currentSnapshot.snapshot.seq + 1
+ ids.currentSnapshot.snapshot = snapshot
+
+ log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
+ return true
}
-func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, snapshot *identifySnapshot, s network.Stream) error {
- mes := ids.createBaseIdentifyResponse(c, snapshot)
- sr := ids.getSignedRecord(snapshot)
- mes.SignedPeerRecord = sr
- writer := protoio.NewDelimitedWriter(s)
+func (ids *idService) writeChunkedIdentifyMsg(s network.Stream, mes *pb.Identify) error {
+ writer := pbio.NewDelimitedWriter(s)
- if sr == nil || proto.Size(mes) <= legacyIDSize {
+ if mes.SignedPeerRecord == nil || proto.Size(mes) <= legacyIDSize {
return writer.WriteMsg(mes)
}
+
+ sr := mes.SignedPeerRecord
mes.SignedPeerRecord = nil
if err := writer.WriteMsg(mes); err != nil {
return err
}
-
// then write just the signed record
- m := &pb.Identify{SignedPeerRecord: sr}
- err := writer.WriteMsg(m)
- return err
-
+ return writer.WriteMsg(&pb.Identify{SignedPeerRecord: sr})
}
-func (ids *idService) createBaseIdentifyResponse(
- conn network.Conn,
- snapshot *identifySnapshot,
-) *pb.Identify {
+func (ids *idService) createBaseIdentifyResponse(conn network.Conn, snapshot *identifySnapshot) *pb.Identify {
mes := &pb.Identify{}
remoteAddr := conn.RemoteMultiaddr()
localAddr := conn.LocalMultiaddr()
// set protocols this node is currently handling
- mes.Protocols = snapshot.protocols
+ mes.Protocols = protocol.ConvertToStrings(snapshot.protocols)
// observed address so other side is informed of their
// "public" address, at least in relation to us.
@@ -541,10 +647,8 @@ func (ids *idService) createBaseIdentifyResponse(
}
// set protocol versions
- pv := LibP2PVersion
- av := ids.UserAgent
- mes.ProtocolVersion = &pv
- mes.AgentVersion = &av
+ mes.ProtocolVersion = &ids.ProtocolVersion
+ mes.AgentVersion = &ids.UserAgent
return mes
}
@@ -563,11 +667,50 @@ func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte {
return recBytes
}
-func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
+// diff takes two slices of strings (a and b) and computes which elements were added and removed in b
+func diff(a, b []protocol.ID) (added, removed []protocol.ID) {
+ // This is O(n^2), but it's fine because the slices are small.
+ for _, x := range b {
+ var found bool
+ for _, y := range a {
+ if x == y {
+ found = true
+ break
+ }
+ }
+ if !found {
+ added = append(added, x)
+ }
+ }
+ for _, x := range a {
+ var found bool
+ for _, y := range b {
+ if x == y {
+ found = true
+ break
+ }
+ }
+ if !found {
+ removed = append(removed, x)
+ }
+ }
+ return
+}
+
+func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn, isPush bool) {
p := c.RemotePeer()
- // mes.Protocols
- ids.Host.Peerstore().SetProtocols(p, mes.Protocols...)
+ supported, _ := ids.Host.Peerstore().GetProtocols(p)
+ mesProtocols := protocol.ConvertFromStrings(mes.Protocols)
+ added, removed := diff(supported, mesProtocols)
+ ids.Host.Peerstore().SetProtocols(p, mesProtocols...)
+ if isPush {
+ ids.emitters.evtPeerProtocolsUpdated.Emit(event.EvtPeerProtocolsUpdated{
+ Peer: p,
+ Added: added,
+ Removed: removed,
+ })
+ }
// mes.ObservedAddr
ids.consumeObservedAddress(mes.GetObservedAddr(), c)
@@ -595,7 +738,6 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
// add certified addresses for the peer, if they sent us a signed peer record
// otherwise use the unsigned addresses.
- var signedPeerRecord *record.Envelope
signedPeerRecord, err := signedPeerRecordFromMessage(mes)
if err != nil {
log.Errorf("error getting peer record from Identify message: %v", err)
@@ -617,16 +759,18 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
}
- // add signed addrs if we have them and the peerstore supports them
- cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore())
- if ok && signedPeerRecord != nil {
- _, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl)
- if addErr != nil {
- log.Debugf("error adding signed addrs to peerstore: %v", addErr)
+ var addrs []ma.Multiaddr
+ if signedPeerRecord != nil {
+ signedAddrs, err := ids.consumeSignedPeerRecord(c.RemotePeer(), signedPeerRecord)
+ if err != nil {
+ log.Debugf("failed to consume signed peer record: %s", err)
+ } else {
+ addrs = signedAddrs
}
} else {
- ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl)
+ addrs = lmaddrs
}
+ ids.Host.Peerstore().AddAddrs(p, filterAddrs(addrs, c.RemoteMultiaddr()), ttl)
// Finally, expire all temporary addrs.
ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
@@ -645,6 +789,34 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) {
ids.consumeReceivedPubKey(c, mes.PublicKey)
}
+func (ids *idService) consumeSignedPeerRecord(p peer.ID, signedPeerRecord *record.Envelope) ([]ma.Multiaddr, error) {
+ if signedPeerRecord.PublicKey == nil {
+ return nil, errors.New("missing pubkey")
+ }
+ id, err := peer.IDFromPublicKey(signedPeerRecord.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to derive peer ID: %s", err)
+ }
+ if id != p {
+ return nil, fmt.Errorf("received signed peer record envelope for unexpected peer ID. expected %s, got %s", p, id)
+ }
+ r, err := signedPeerRecord.Record()
+ if err != nil {
+ return nil, fmt.Errorf("failed to obtain record: %w", err)
+ }
+ rec, ok := r.(*peer.PeerRecord)
+ if !ok {
+ return nil, errors.New("not a peer record")
+ }
+ if rec.PeerID != p {
+ return nil, fmt.Errorf("received signed peer record for unexpected peer ID. expected %s, got %s", p, rec.PeerID)
+ }
+ // Don't put the signed peer record into the peer store.
+ // They're not used anywhere.
+ // All we care about are the addresses.
+ return rec.Addrs, nil
+}
+
func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
lp := c.LocalPeer()
rp := c.RemotePeer()
@@ -762,6 +934,15 @@ func (ids *idService) consumeObservedAddress(observed []byte, c network.Conn) {
ids.observedAddrs.Record(c, maddr)
}
+// addConnWithLock assuems caller holds the connsMu lock
+func (ids *idService) addConnWithLock(c network.Conn) {
+ _, found := ids.conns[c]
+ if !found {
+ <-ids.setupCompleted
+ ids.conns[c] = entry{}
+ }
+}
+
func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
if msg.SignedPeerRecord == nil || len(msg.SignedPeerRecord) == 0 {
return nil, nil
@@ -770,40 +951,53 @@ func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
return env, err
}
-// netNotifiee defines methods to be used with the IpfsDHT
+// netNotifiee defines methods to be used with the swarm
type netNotifiee idService
func (nn *netNotifiee) IDService() *idService {
return (*idService)(nn)
}
-func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
- nn.IDService().IdentifyWait(v)
+func (nn *netNotifiee) Connected(_ network.Network, c network.Conn) {
+ ids := nn.IDService()
+
+ ids.connsMu.Lock()
+ ids.addConnWithLock(c)
+ ids.connsMu.Unlock()
+
+ nn.IDService().IdentifyWait(c)
}
-func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
+func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
ids := nn.IDService()
// Stop tracking the connection.
- ids.removeConn(v)
-
- // undo the setting of addresses to peer.ConnectedAddrTTL we did
- ids.addrMu.Lock()
- defer ids.addrMu.Unlock()
-
- if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected {
- // consider removing the peer handler for this
- select {
- case ids.rmPeerHandlerCh <- rmPeerHandlerReq{v.RemotePeer()}:
- case <-ids.ctx.Done():
- return
- }
+ ids.connsMu.Lock()
+ delete(ids.conns, c)
+ ids.connsMu.Unlock()
+ if ids.Host.Network().Connectedness(c.RemotePeer()) != network.Connected {
// Last disconnect.
- ps := ids.Host.Peerstore()
- ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
+ // Undo the setting of addresses to peer.ConnectedAddrTTL we did
+ ids.addrMu.Lock()
+ defer ids.addrMu.Unlock()
+ ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
}
}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
+
+// filterAddrs filters the address slice based on the remove multiaddr:
+// * if it's a localhost address, no filtering is applied
+// * if it's a local network address, all localhost addresses are filtered out
+// * if it's a public address, all localhost and local network addresses are filtered out
+func filterAddrs(addrs []ma.Multiaddr, remote ma.Multiaddr) []ma.Multiaddr {
+ if manet.IsIPLoopback(remote) {
+ return addrs
+ }
+ if manet.IsPrivateAddr(remote) {
+ return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) })
+ }
+ return ma.FilterAddrs(addrs, manet.IsPublicAddr)
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go
deleted file mode 100644
index 7f7c75f12..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package identify
-
-import (
- "time"
-
- "github.com/libp2p/go-libp2p/core/event"
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/protocol"
- pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
-
- "github.com/libp2p/go-msgio/protoio"
-)
-
-const IDDelta = "/p2p/id/delta/1.0.0"
-
-const deltaMsgSize = 2048
-
-// deltaHandler handles incoming delta updates from peers.
-func (ids *idService) deltaHandler(s network.Stream) {
- if err := s.Scope().SetService(ServiceName); err != nil {
- log.Warnf("error attaching stream to identify service: %s", err)
- s.Reset()
- return
- }
-
- if err := s.Scope().ReserveMemory(deltaMsgSize, network.ReservationPriorityAlways); err != nil {
- log.Warnf("error reserving memory for identify stream: %s", err)
- s.Reset()
- return
- }
- defer s.Scope().ReleaseMemory(deltaMsgSize)
-
- _ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
-
- c := s.Conn()
-
- r := protoio.NewDelimitedReader(s, deltaMsgSize)
- mes := pb.Identify{}
- if err := r.ReadMsg(&mes); err != nil {
- log.Warn("error reading identify message: ", err)
- _ = s.Reset()
- return
- }
-
- defer s.Close()
-
- log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr())
-
- delta := mes.GetDelta()
- if delta == nil {
- return
- }
-
- p := s.Conn().RemotePeer()
- if err := ids.consumeDelta(p, delta); err != nil {
- _ = s.Reset()
- log.Warnf("delta update from peer %s failed: %s", p, err)
- }
-}
-
-// consumeDelta processes an incoming delta from a peer, updating the peerstore
-// and emitting the appropriate events.
-func (ids *idService) consumeDelta(id peer.ID, delta *pb.Delta) error {
- err := ids.Host.Peerstore().AddProtocols(id, delta.GetAddedProtocols()...)
- if err != nil {
- return err
- }
-
- err = ids.Host.Peerstore().RemoveProtocols(id, delta.GetRmProtocols()...)
- if err != nil {
- return err
- }
-
- evt := event.EvtPeerProtocolsUpdated{
- Peer: id,
- Added: protocol.ConvertFromStrings(delta.GetAddedProtocols()),
- Removed: protocol.ConvertFromStrings(delta.GetRmProtocols()),
- }
- ids.emitters.evtPeerProtocolsUpdated.Emit(evt)
- return nil
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go
deleted file mode 100644
index 22be28a5e..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go
+++ /dev/null
@@ -1,22 +0,0 @@
-//go:build !go1.18
-
-package identify
-
-import (
- "fmt"
- "runtime/debug"
-)
-
-func init() {
- bi, ok := debug.ReadBuildInfo()
- // ok will only be true if this is built as a dependency of another module
- if !ok {
- return
- }
- version := bi.Main.Version
- if version == "(devel)" {
- defaultUserAgent = bi.Main.Path
- } else {
- defaultUserAgent = fmt.Sprintf("%s@%s", bi.Main.Path, bi.Main.Version)
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go
deleted file mode 100644
index cbb47a9fa..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package identify
-
-import (
- "github.com/libp2p/go-libp2p/core/network"
-)
-
-// IDPush is the protocol.ID of the Identify push protocol. It sends full identify messages containing
-// the current state of the peer.
-//
-// It is in the process of being replaced by identify delta, which sends only diffs for better
-// resource utilisation.
-const IDPush = "/ipfs/id/push/1.0.0"
-
-// pushHandler handles incoming identify push streams. The behaviour is identical to the ordinary identify protocol.
-func (ids *idService) pushHandler(s network.Stream) {
- ids.handleIdentifyResponse(s)
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go
new file mode 100644
index 000000000..28598fa33
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go
@@ -0,0 +1,206 @@
+package identify
+
+import (
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_identify"
+
+var (
+ pushesTriggered = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "identify_pushes_triggered_total",
+ Help: "Pushes Triggered",
+ },
+ []string{"trigger"},
+ )
+ identify = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "identify_total",
+ Help: "Identify",
+ },
+ []string{"dir"},
+ )
+ identifyPush = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "identify_push_total",
+ Help: "Identify Push",
+ },
+ []string{"dir"},
+ )
+ connPushSupportTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "conn_push_support_total",
+ Help: "Identify Connection Push Support",
+ },
+ []string{"support"},
+ )
+ protocolsCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "protocols_count",
+ Help: "Protocols Count",
+ },
+ )
+ addrsCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "addrs_count",
+ Help: "Address Count",
+ },
+ )
+ numProtocolsReceived = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "protocols_received",
+ Help: "Number of Protocols received",
+ Buckets: buckets,
+ },
+ )
+ numAddrsReceived = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "addrs_received",
+ Help: "Number of addrs received",
+ Buckets: buckets,
+ },
+ )
+ collectors = []prometheus.Collector{
+ pushesTriggered,
+ identify,
+ identifyPush,
+ connPushSupportTotal,
+ protocolsCount,
+ addrsCount,
+ numProtocolsReceived,
+ numAddrsReceived,
+ }
+ // 1 to 20 and then up to 100 in steps of 5
+ buckets = append(
+ prometheus.LinearBuckets(1, 1, 20),
+ prometheus.LinearBuckets(25, 5, 16)...,
+ )
+)
+
+type MetricsTracer interface {
+ // TriggeredPushes counts IdentifyPushes triggered by event
+ TriggeredPushes(event any)
+
+ // ConnPushSupport counts peers by Push Support
+ ConnPushSupport(identifyPushSupport)
+
+ // IdentifyReceived tracks metrics on receiving an identify response
+ IdentifyReceived(isPush bool, numProtocols int, numAddrs int)
+
+ // IdentifySent tracks metrics on sending an identify response
+ IdentifySent(isPush bool, numProtocols int, numAddrs int)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (t *metricsTracer) TriggeredPushes(ev any) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ typ := "unknown"
+ switch ev.(type) {
+ case event.EvtLocalProtocolsUpdated:
+ typ = "protocols_updated"
+ case event.EvtLocalAddressesUpdated:
+ typ = "addresses_updated"
+ }
+ *tags = append(*tags, typ)
+ pushesTriggered.WithLabelValues(*tags...).Inc()
+}
+
+func (t *metricsTracer) IncrementPushSupport(s identifyPushSupport) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, getPushSupport(s))
+ connPushSupportTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ if isPush {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound))
+ identifyPush.WithLabelValues(*tags...).Inc()
+ } else {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirInbound))
+ identify.WithLabelValues(*tags...).Inc()
+ }
+
+ protocolsCount.Set(float64(numProtocols))
+ addrsCount.Set(float64(numAddrs))
+}
+
+func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ if isPush {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirInbound))
+ identifyPush.WithLabelValues(*tags...).Inc()
+ } else {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound))
+ identify.WithLabelValues(*tags...).Inc()
+ }
+
+ numProtocolsReceived.Observe(float64(numProtocols))
+ numAddrsReceived.Observe(float64(numAddrs))
+}
+
+func (t *metricsTracer) ConnPushSupport(support identifyPushSupport) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, getPushSupport(support))
+ connPushSupportTotal.WithLabelValues(*tags...).Inc()
+}
+
+func getPushSupport(s identifyPushSupport) string {
+ switch s {
+ case identifyPushSupported:
+ return "supported"
+ case identifyPushUnsupported:
+ return "not supported"
+ default:
+ return "unknown"
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go
index bd72175dc..451af096d 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go
@@ -141,7 +141,7 @@ func NewObservedAddrManager(host host.Host) (*ObservedAddrManager, error) {
}
oas.ctx, oas.ctxCancel = context.WithCancel(context.Background())
- reachabilitySub, err := host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
+ reachabilitySub, err := host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("identify (obsaddr)"))
if err != nil {
return nil, fmt.Errorf("failed to subscribe to reachability event: %s", err)
}
@@ -356,54 +356,99 @@ func (oas *ObservedAddrManager) removeConn(conn network.Conn) {
oas.activeConnsMu.Unlock()
}
-func (oas *ObservedAddrManager) maybeRecordObservation(conn network.Conn, observed ma.Multiaddr) {
+type normalizeMultiaddrer interface {
+ NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr
+}
+
+type addrsProvider interface {
+ Addrs() []ma.Multiaddr
+}
+
+type listenAddrsProvider interface {
+ ListenAddresses() []ma.Multiaddr
+ InterfaceListenAddresses() ([]ma.Multiaddr, error)
+}
+
+func shouldRecordObservation(host addrsProvider, network listenAddrsProvider, conn network.ConnMultiaddrs, observed ma.Multiaddr) bool {
// First, determine if this observation is even worth keeping...
// Ignore observations from loopback nodes. We already know our loopback
// addresses.
if manet.IsIPLoopback(observed) {
- return
+ return false
}
// we should only use ObservedAddr when our connection's LocalAddr is one
// of our ListenAddrs. If we Dial out using an ephemeral addr, knowing that
// address's external mapping is not very useful because the port will not be
// the same as the listen addr.
- ifaceaddrs, err := oas.host.Network().InterfaceListenAddresses()
+ ifaceaddrs, err := network.InterfaceListenAddresses()
if err != nil {
log.Infof("failed to get interface listen addrs", err)
- return
+ return false
+ }
+
+ normalizer, canNormalize := host.(normalizeMultiaddrer)
+
+ if canNormalize {
+ for i, a := range ifaceaddrs {
+ ifaceaddrs[i] = normalizer.NormalizeMultiaddr(a)
+ }
}
local := conn.LocalMultiaddr()
- if !ma.Contains(ifaceaddrs, local) && !ma.Contains(oas.host.Network().ListenAddresses(), local) {
+ if canNormalize {
+ local = normalizer.NormalizeMultiaddr(local)
+ }
+
+ listenAddrs := network.ListenAddresses()
+ if canNormalize {
+ for i, a := range listenAddrs {
+ listenAddrs[i] = normalizer.NormalizeMultiaddr(a)
+ }
+ }
+
+ if !ma.Contains(ifaceaddrs, local) && !ma.Contains(listenAddrs, local) {
// not in our list
- return
+ return false
+ }
+
+ hostAddrs := host.Addrs()
+ if canNormalize {
+ for i, a := range hostAddrs {
+ hostAddrs[i] = normalizer.NormalizeMultiaddr(a)
+ }
}
// We should reject the connection if the observation doesn't match the
// transports of one of our advertised addresses.
- if !HasConsistentTransport(observed, oas.host.Addrs()) &&
- !HasConsistentTransport(observed, oas.host.Network().ListenAddresses()) {
+ if !HasConsistentTransport(observed, hostAddrs) &&
+ !HasConsistentTransport(observed, listenAddrs) {
log.Debugw(
"observed multiaddr doesn't match the transports of any announced addresses",
"from", conn.RemoteMultiaddr(),
"observed", observed,
)
- return
+ return false
}
- // Ok, the observation is good, record it.
- log.Debugw("added own observed listen addr", "observed", observed)
+ return true
+}
- defer oas.addConn(conn, observed)
+func (oas *ObservedAddrManager) maybeRecordObservation(conn network.Conn, observed ma.Multiaddr) {
+ shouldRecord := shouldRecordObservation(oas.host, oas.host.Network(), conn, observed)
+ if shouldRecord {
+ // Ok, the observation is good, record it.
+ log.Debugw("added own observed listen addr", "observed", observed)
+ defer oas.addConn(conn, observed)
- oas.mu.Lock()
- defer oas.mu.Unlock()
- oas.recordObservationUnlocked(conn, observed)
+ oas.mu.Lock()
+ defer oas.mu.Unlock()
+ oas.recordObservationUnlocked(conn, observed)
- if oas.reachability == network.ReachabilityPrivate {
- oas.emitAllNATTypes()
+ if oas.reachability == network.ReachabilityPrivate {
+ oas.emitAllNATTypes()
+ }
}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go
index 0eb1d9664..f18866568 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go
@@ -1,13 +1,23 @@
package identify
type config struct {
+ protocolVersion string
userAgent string
disableSignedPeerRecord bool
+ metricsTracer MetricsTracer
}
// Option is an option function for identify.
type Option func(*config)
+// ProtocolVersion sets the protocol version string that will be used to
+// identify the family of protocols used by the peer.
+func ProtocolVersion(s string) Option {
+ return func(cfg *config) {
+ cfg.protocolVersion = s
+ }
+}
+
// UserAgent sets the user agent this node will identify itself with to peers.
func UserAgent(ua string) Option {
return func(cfg *config) {
@@ -22,3 +32,9 @@ func DisableSignedPeerRecord() Option {
cfg.disableSignedPeerRecord = true
}
}
+
+func WithMetricsTracer(tr MetricsTracer) Option {
+ return func(cfg *config) {
+ cfg.metricsTracer = tr
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile
deleted file mode 100644
index eb14b5768..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go
index 3cfed8270..1c93815d4 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go
@@ -1,91 +1,35 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: identify.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/identify.proto
-package identify_pb
+package pb
import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- proto "github.com/gogo/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type Delta struct {
- // new protocols now serviced by the peer.
- AddedProtocols []string `protobuf:"bytes,1,rep,name=added_protocols,json=addedProtocols" json:"added_protocols,omitempty"`
- // protocols dropped by the peer.
- RmProtocols []string `protobuf:"bytes,2,rep,name=rm_protocols,json=rmProtocols" json:"rm_protocols,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Delta) Reset() { *m = Delta{} }
-func (m *Delta) String() string { return proto.CompactTextString(m) }
-func (*Delta) ProtoMessage() {}
-func (*Delta) Descriptor() ([]byte, []int) {
- return fileDescriptor_83f1e7e6b485409f, []int{0}
-}
-func (m *Delta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Delta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Delta.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Delta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Delta.Merge(m, src)
-}
-func (m *Delta) XXX_Size() int {
- return m.Size()
-}
-func (m *Delta) XXX_DiscardUnknown() {
- xxx_messageInfo_Delta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Delta proto.InternalMessageInfo
-
-func (m *Delta) GetAddedProtocols() []string {
- if m != nil {
- return m.AddedProtocols
- }
- return nil
-}
-
-func (m *Delta) GetRmProtocols() []string {
- if m != nil {
- return m.RmProtocols
- }
- return nil
-}
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type Identify struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
// protocolVersion determines compatibility between peers
- ProtocolVersion *string `protobuf:"bytes,5,opt,name=protocolVersion" json:"protocolVersion,omitempty"`
+ ProtocolVersion *string `protobuf:"bytes,5,opt,name=protocolVersion" json:"protocolVersion,omitempty"` // e.g. ipfs/1.0.0
// agentVersion is like a UserAgent string in browsers, or client version in bittorrent
// includes the client name and client.
- AgentVersion *string `protobuf:"bytes,6,opt,name=agentVersion" json:"agentVersion,omitempty"`
+ AgentVersion *string `protobuf:"bytes,6,opt,name=agentVersion" json:"agentVersion,omitempty"` // e.g. go-ipfs/0.1.0
// publicKey is this node's public key (which also gives its node.ID)
// - may not need to be sent, as secure channel implies it has been sent.
// - then again, if we change / disable secure channel, may still want it.
@@ -98,877 +42,178 @@ type Identify struct {
ObservedAddr []byte `protobuf:"bytes,4,opt,name=observedAddr" json:"observedAddr,omitempty"`
// protocols are the services this node is running
Protocols []string `protobuf:"bytes,3,rep,name=protocols" json:"protocols,omitempty"`
- // a delta update is incompatible with everything else. If this field is included, none of the others can appear.
- Delta *Delta `protobuf:"bytes,7,opt,name=delta" json:"delta,omitempty"`
// signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord,
// signed by the sending node. It contains the same addresses as the listenAddrs field, but
// in a form that lets us share authenticated addrs with other peers.
// see github.com/libp2p/go-libp2p/core/record/pb/envelope.proto and
// github.com/libp2p/go-libp2p/core/peer/pb/peer_record.proto for message definitions.
- SignedPeerRecord []byte `protobuf:"bytes,8,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ SignedPeerRecord []byte `protobuf:"bytes,8,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"`
}
-func (m *Identify) Reset() { *m = Identify{} }
-func (m *Identify) String() string { return proto.CompactTextString(m) }
-func (*Identify) ProtoMessage() {}
-func (*Identify) Descriptor() ([]byte, []int) {
- return fileDescriptor_83f1e7e6b485409f, []int{1}
-}
-func (m *Identify) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Identify) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Identify.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (x *Identify) Reset() {
+ *x = Identify{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_identify_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
}
-func (m *Identify) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Identify.Merge(m, src)
-}
-func (m *Identify) XXX_Size() int {
- return m.Size()
-}
-func (m *Identify) XXX_DiscardUnknown() {
- xxx_messageInfo_Identify.DiscardUnknown(m)
+
+func (x *Identify) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_Identify proto.InternalMessageInfo
+func (*Identify) ProtoMessage() {}
-func (m *Identify) GetProtocolVersion() string {
- if m != nil && m.ProtocolVersion != nil {
- return *m.ProtocolVersion
+func (x *Identify) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_identify_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (m *Identify) GetAgentVersion() string {
- if m != nil && m.AgentVersion != nil {
- return *m.AgentVersion
- }
- return ""
+// Deprecated: Use Identify.ProtoReflect.Descriptor instead.
+func (*Identify) Descriptor() ([]byte, []int) {
+ return file_pb_identify_proto_rawDescGZIP(), []int{0}
}
-func (m *Identify) GetPublicKey() []byte {
- if m != nil {
- return m.PublicKey
+func (x *Identify) GetProtocolVersion() string {
+ if x != nil && x.ProtocolVersion != nil {
+ return *x.ProtocolVersion
}
- return nil
+ return ""
}
-func (m *Identify) GetListenAddrs() [][]byte {
- if m != nil {
- return m.ListenAddrs
+func (x *Identify) GetAgentVersion() string {
+ if x != nil && x.AgentVersion != nil {
+ return *x.AgentVersion
}
- return nil
+ return ""
}
-func (m *Identify) GetObservedAddr() []byte {
- if m != nil {
- return m.ObservedAddr
+func (x *Identify) GetPublicKey() []byte {
+ if x != nil {
+ return x.PublicKey
}
return nil
}
-func (m *Identify) GetProtocols() []string {
- if m != nil {
- return m.Protocols
+func (x *Identify) GetListenAddrs() [][]byte {
+ if x != nil {
+ return x.ListenAddrs
}
return nil
}
-func (m *Identify) GetDelta() *Delta {
- if m != nil {
- return m.Delta
+func (x *Identify) GetObservedAddr() []byte {
+ if x != nil {
+ return x.ObservedAddr
}
return nil
}
-func (m *Identify) GetSignedPeerRecord() []byte {
- if m != nil {
- return m.SignedPeerRecord
+func (x *Identify) GetProtocols() []string {
+ if x != nil {
+ return x.Protocols
}
return nil
}
-func init() {
- proto.RegisterType((*Delta)(nil), "identify.pb.Delta")
- proto.RegisterType((*Identify)(nil), "identify.pb.Identify")
-}
-
-func init() { proto.RegisterFile("identify.proto", fileDescriptor_83f1e7e6b485409f) }
-
-var fileDescriptor_83f1e7e6b485409f = []byte{
- // 272 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0x41, 0x4a, 0xc3, 0x40,
- 0x14, 0x86, 0x99, 0xd6, 0x6a, 0xf3, 0x12, 0x5a, 0x99, 0xd5, 0x2c, 0x24, 0xc4, 0x6c, 0x1c, 0x5c,
- 0x64, 0xe1, 0x0d, 0x14, 0x37, 0xe2, 0xa6, 0x8c, 0xe0, 0x56, 0x92, 0xbc, 0x67, 0x19, 0x48, 0x33,
- 0x65, 0x32, 0x0a, 0xbd, 0x95, 0xc7, 0x70, 0xe9, 0x11, 0x24, 0x27, 0x91, 0x4c, 0x4d, 0x93, 0xea,
- 0x72, 0x3e, 0x3e, 0xe6, 0x7f, 0xff, 0x0f, 0x0b, 0x8d, 0x54, 0x3b, 0xfd, 0xba, 0xcb, 0xb6, 0xd6,
- 0x38, 0xc3, 0xc3, 0xe1, 0x5d, 0xa4, 0x4f, 0x30, 0xbb, 0xa7, 0xca, 0xe5, 0xfc, 0x0a, 0x96, 0x39,
- 0x22, 0xe1, 0x8b, 0x97, 0x4a, 0x53, 0x35, 0x82, 0x25, 0x53, 0x19, 0xa8, 0x85, 0xc7, 0xab, 0x9e,
- 0xf2, 0x4b, 0x88, 0xec, 0x66, 0x64, 0x4d, 0xbc, 0x15, 0xda, 0xcd, 0x41, 0x49, 0x3f, 0x26, 0x30,
- 0x7f, 0xf8, 0x0d, 0xe1, 0x12, 0x96, 0xbd, 0xfc, 0x4c, 0xb6, 0xd1, 0xa6, 0x16, 0xb3, 0x84, 0xc9,
- 0x40, 0xfd, 0xc5, 0x3c, 0x85, 0x28, 0x5f, 0x53, 0xed, 0x7a, 0xed, 0xd4, 0x6b, 0x47, 0x8c, 0x5f,
- 0x40, 0xb0, 0x7d, 0x2b, 0x2a, 0x5d, 0x3e, 0xd2, 0x4e, 0xb0, 0x84, 0xc9, 0x48, 0x0d, 0x80, 0x27,
- 0x10, 0x56, 0xba, 0x71, 0x54, 0xdf, 0x22, 0xda, 0xfd, 0x69, 0x91, 0x1a, 0xa3, 0x2e, 0xc3, 0x14,
- 0x0d, 0xd9, 0x77, 0xc2, 0x0e, 0x88, 0x13, 0xff, 0xc5, 0x11, 0xf3, 0x19, 0x87, 0x7a, 0x53, 0x5f,
- 0x6f, 0x00, 0x5c, 0xc2, 0x0c, 0xbb, 0xc5, 0xc4, 0x59, 0xc2, 0x64, 0x78, 0xc3, 0xb3, 0xd1, 0x9c,
- 0x99, 0xdf, 0x52, 0xed, 0x05, 0x7e, 0x0d, 0xe7, 0x8d, 0x5e, 0xd7, 0x84, 0x2b, 0x22, 0xab, 0xa8,
- 0x34, 0x16, 0xc5, 0xdc, 0xe7, 0xfd, 0xe3, 0x77, 0xd1, 0x67, 0x1b, 0xb3, 0xaf, 0x36, 0x66, 0xdf,
- 0x6d, 0xcc, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x03, 0xc8, 0x41, 0xb3, 0x01, 0x00, 0x00,
-}
-
-func (m *Delta) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Delta) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Delta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RmProtocols) > 0 {
- for iNdEx := len(m.RmProtocols) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.RmProtocols[iNdEx])
- copy(dAtA[i:], m.RmProtocols[iNdEx])
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.RmProtocols[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.AddedProtocols) > 0 {
- for iNdEx := len(m.AddedProtocols) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.AddedProtocols[iNdEx])
- copy(dAtA[i:], m.AddedProtocols[iNdEx])
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.AddedProtocols[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Identify) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Identify) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Identify) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.SignedPeerRecord != nil {
- i -= len(m.SignedPeerRecord)
- copy(dAtA[i:], m.SignedPeerRecord)
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.SignedPeerRecord)))
- i--
- dAtA[i] = 0x42
- }
- if m.Delta != nil {
- {
- size, err := m.Delta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintIdentify(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- if m.AgentVersion != nil {
- i -= len(*m.AgentVersion)
- copy(dAtA[i:], *m.AgentVersion)
- i = encodeVarintIdentify(dAtA, i, uint64(len(*m.AgentVersion)))
- i--
- dAtA[i] = 0x32
- }
- if m.ProtocolVersion != nil {
- i -= len(*m.ProtocolVersion)
- copy(dAtA[i:], *m.ProtocolVersion)
- i = encodeVarintIdentify(dAtA, i, uint64(len(*m.ProtocolVersion)))
- i--
- dAtA[i] = 0x2a
- }
- if m.ObservedAddr != nil {
- i -= len(m.ObservedAddr)
- copy(dAtA[i:], m.ObservedAddr)
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.ObservedAddr)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Protocols) > 0 {
- for iNdEx := len(m.Protocols) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Protocols[iNdEx])
- copy(dAtA[i:], m.Protocols[iNdEx])
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.Protocols[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.ListenAddrs) > 0 {
- for iNdEx := len(m.ListenAddrs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ListenAddrs[iNdEx])
- copy(dAtA[i:], m.ListenAddrs[iNdEx])
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.ListenAddrs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.PublicKey != nil {
- i -= len(m.PublicKey)
- copy(dAtA[i:], m.PublicKey)
- i = encodeVarintIdentify(dAtA, i, uint64(len(m.PublicKey)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintIdentify(dAtA []byte, offset int, v uint64) int {
- offset -= sovIdentify(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Delta) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.AddedProtocols) > 0 {
- for _, s := range m.AddedProtocols {
- l = len(s)
- n += 1 + l + sovIdentify(uint64(l))
- }
- }
- if len(m.RmProtocols) > 0 {
- for _, s := range m.RmProtocols {
- l = len(s)
- n += 1 + l + sovIdentify(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Identify) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.PublicKey != nil {
- l = len(m.PublicKey)
- n += 1 + l + sovIdentify(uint64(l))
- }
- if len(m.ListenAddrs) > 0 {
- for _, b := range m.ListenAddrs {
- l = len(b)
- n += 1 + l + sovIdentify(uint64(l))
- }
- }
- if len(m.Protocols) > 0 {
- for _, s := range m.Protocols {
- l = len(s)
- n += 1 + l + sovIdentify(uint64(l))
- }
- }
- if m.ObservedAddr != nil {
- l = len(m.ObservedAddr)
- n += 1 + l + sovIdentify(uint64(l))
- }
- if m.ProtocolVersion != nil {
- l = len(*m.ProtocolVersion)
- n += 1 + l + sovIdentify(uint64(l))
- }
- if m.AgentVersion != nil {
- l = len(*m.AgentVersion)
- n += 1 + l + sovIdentify(uint64(l))
- }
- if m.Delta != nil {
- l = m.Delta.Size()
- n += 1 + l + sovIdentify(uint64(l))
- }
- if m.SignedPeerRecord != nil {
- l = len(m.SignedPeerRecord)
- n += 1 + l + sovIdentify(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovIdentify(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozIdentify(x uint64) (n int) {
- return sovIdentify(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Delta) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Delta: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Delta: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AddedProtocols", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.AddedProtocols = append(m.AddedProtocols, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RmProtocols", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RmProtocols = append(m.RmProtocols, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIdentify(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthIdentify
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *Identify) GetSignedPeerRecord() []byte {
+ if x != nil {
+ return x.SignedPeerRecord
}
return nil
}
-func (m *Identify) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Identify: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Identify: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...)
- if m.PublicKey == nil {
- m.PublicKey = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListenAddrs", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ListenAddrs = append(m.ListenAddrs, make([]byte, postIndex-iNdEx))
- copy(m.ListenAddrs[len(m.ListenAddrs)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Protocols", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Protocols = append(m.Protocols, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObservedAddr", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ObservedAddr = append(m.ObservedAddr[:0], dAtA[iNdEx:postIndex]...)
- if m.ObservedAddr == nil {
- m.ObservedAddr = []byte{}
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProtocolVersion", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.ProtocolVersion = &s
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(dAtA[iNdEx:postIndex])
- m.AgentVersion = &s
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Delta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Delta == nil {
- m.Delta = &Delta{}
- }
- if err := m.Delta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SignedPeerRecord", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthIdentify
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthIdentify
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SignedPeerRecord = append(m.SignedPeerRecord[:0], dAtA[iNdEx:postIndex]...)
- if m.SignedPeerRecord == nil {
- m.SignedPeerRecord = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipIdentify(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthIdentify
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipIdentify(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowIdentify
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthIdentify
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupIdentify
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthIdentify
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
+var File_pb_identify_proto protoreflect.FileDescriptor
+
+var file_pb_identify_proto_rawDesc = []byte{
+ 0x0a, 0x11, 0x70, 0x62, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x2e, 0x70, 0x62,
+ 0x22, 0x86, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12, 0x28, 0x0a,
+ 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09,
+ 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x6c, 0x69, 0x73,
+ 0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b,
+ 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6f,
+ 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x0c, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12,
+ 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x2a, 0x0a,
+ 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50,
+ 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
}
var (
- ErrInvalidLengthIdentify = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowIdentify = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupIdentify = fmt.Errorf("proto: unexpected end of group")
+ file_pb_identify_proto_rawDescOnce sync.Once
+ file_pb_identify_proto_rawDescData = file_pb_identify_proto_rawDesc
)
+
+func file_pb_identify_proto_rawDescGZIP() []byte {
+ file_pb_identify_proto_rawDescOnce.Do(func() {
+ file_pb_identify_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_identify_proto_rawDescData)
+ })
+ return file_pb_identify_proto_rawDescData
+}
+
+var file_pb_identify_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_pb_identify_proto_goTypes = []interface{}{
+ (*Identify)(nil), // 0: identify.pb.Identify
+}
+var file_pb_identify_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_pb_identify_proto_init() }
+func file_pb_identify_proto_init() {
+ if File_pb_identify_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_identify_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Identify); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_identify_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_identify_proto_goTypes,
+ DependencyIndexes: file_pb_identify_proto_depIdxs,
+ MessageInfos: file_pb_identify_proto_msgTypes,
+ }.Build()
+ File_pb_identify_proto = out.File
+ file_pb_identify_proto_rawDesc = nil
+ file_pb_identify_proto_goTypes = nil
+ file_pb_identify_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto
index bdb283305..cda102d41 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto
@@ -2,13 +2,6 @@ syntax = "proto2";
package identify.pb;
-message Delta {
- // new protocols now serviced by the peer.
- repeated string added_protocols = 1;
- // protocols dropped by the peer.
- repeated string rm_protocols = 2;
-}
-
message Identify {
// protocolVersion determines compatibility between peers
@@ -34,9 +27,6 @@ message Identify {
// protocols are the services this node is running
repeated string protocols = 3;
- // a delta update is incompatible with everything else. If this field is included, none of the others can appear.
- optional Delta delta = 7;
-
// signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord,
// signed by the sending node. It contains the same addresses as the listenAddrs field, but
// in a form that lets us share authenticated addrs with other peers.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go
deleted file mode 100644
index af8549339..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package identify
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p/core/peer"
- "github.com/libp2p/go-libp2p/core/protocol"
- "github.com/libp2p/go-libp2p/core/record"
- pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
-
- "github.com/libp2p/go-msgio/protoio"
- ma "github.com/multiformats/go-multiaddr"
-)
-
-var errProtocolNotSupported = errors.New("protocol not supported")
-
-type identifySnapshot struct {
- protocols []string
- addrs []ma.Multiaddr
- record *record.Envelope
-}
-
-type peerHandler struct {
- ids *idService
-
- cancel context.CancelFunc
-
- pid peer.ID
-
- snapshotMu sync.RWMutex
- snapshot *identifySnapshot
-
- pushCh chan struct{}
- deltaCh chan struct{}
-}
-
-func newPeerHandler(pid peer.ID, ids *idService) *peerHandler {
- ph := &peerHandler{
- ids: ids,
- pid: pid,
-
- snapshot: ids.getSnapshot(),
-
- pushCh: make(chan struct{}, 1),
- deltaCh: make(chan struct{}, 1),
- }
-
- return ph
-}
-
-// start starts a handler. This may only be called on a stopped handler, and must
-// not be called concurrently with start/stop.
-//
-// This may _not_ be called on a _canceled_ handler. I.e., a handler where the
-// passed in context expired.
-func (ph *peerHandler) start(ctx context.Context, onExit func()) {
- if ph.cancel != nil {
- // If this happens, we have a bug. It means we tried to start
- // before we stopped.
- panic("peer handler already running")
- }
-
- ctx, cancel := context.WithCancel(ctx)
- ph.cancel = cancel
-
- go ph.loop(ctx, onExit)
-}
-
-// stop stops a handler. This may not be called concurrently with any
-// other calls to stop/start.
-func (ph *peerHandler) stop() error {
- if ph.cancel != nil {
- ph.cancel()
- ph.cancel = nil
- }
- return nil
-}
-
-// per peer loop for pushing updates
-func (ph *peerHandler) loop(ctx context.Context, onExit func()) {
- defer onExit()
-
- for {
- select {
- // our listen addresses have changed, send an IDPush.
- case <-ph.pushCh:
- if err := ph.sendPush(ctx); err != nil {
- log.Warnw("failed to send Identify Push", "peer", ph.pid, "error", err)
- }
-
- case <-ph.deltaCh:
- if err := ph.sendDelta(ctx); err != nil {
- log.Warnw("failed to send Identify Delta", "peer", ph.pid, "error", err)
- }
-
- case <-ctx.Done():
- return
- }
- }
-}
-
-func (ph *peerHandler) sendDelta(ctx context.Context) error {
- // send a push if the peer does not support the Delta protocol.
- if !ph.peerSupportsProtos(ctx, []string{IDDelta}) {
- log.Debugw("will send push as peer does not support delta", "peer", ph.pid)
- if err := ph.sendPush(ctx); err != nil {
- return fmt.Errorf("failed to send push on delta message: %w", err)
- }
- return nil
- }
-
- // extract a delta message, updating the last state.
- mes := ph.nextDelta()
- if mes == nil || (len(mes.AddedProtocols) == 0 && len(mes.RmProtocols) == 0) {
- return nil
- }
-
- ds, err := ph.openStream(ctx, []string{IDDelta})
- if err != nil {
- return fmt.Errorf("failed to open delta stream: %w", err)
- }
-
- defer ds.Close()
-
- c := ds.Conn()
- if err := protoio.NewDelimitedWriter(ds).WriteMsg(&pb.Identify{Delta: mes}); err != nil {
- _ = ds.Reset()
- return fmt.Errorf("failed to send delta message, %w", err)
- }
- log.Debugw("sent identify update", "protocol", ds.Protocol(), "peer", c.RemotePeer(),
- "peer address", c.RemoteMultiaddr())
-
- return nil
-}
-
-func (ph *peerHandler) sendPush(ctx context.Context) error {
- dp, err := ph.openStream(ctx, []string{IDPush})
- if err == errProtocolNotSupported {
- log.Debugw("not sending push as peer does not support protocol", "peer", ph.pid)
- return nil
- }
- if err != nil {
- return fmt.Errorf("failed to open push stream: %w", err)
- }
- defer dp.Close()
-
- snapshot := ph.ids.getSnapshot()
- ph.snapshotMu.Lock()
- ph.snapshot = snapshot
- ph.snapshotMu.Unlock()
- if err := ph.ids.writeChunkedIdentifyMsg(dp.Conn(), snapshot, dp); err != nil {
- _ = dp.Reset()
- return fmt.Errorf("failed to send push message: %w", err)
- }
-
- return nil
-}
-
-func (ph *peerHandler) openStream(ctx context.Context, protos []string) (network.Stream, error) {
- // wait for the other peer to send us an Identify response on "all" connections we have with it
- // so we can look at it's supported protocols and avoid a multistream-select roundtrip to negotiate the protocol
- // if we know for a fact that it dosen't support the protocol.
- conns := ph.ids.Host.Network().ConnsToPeer(ph.pid)
- for _, c := range conns {
- select {
- case <-ph.ids.IdentifyWait(c):
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- }
-
- if !ph.peerSupportsProtos(ctx, protos) {
- return nil, errProtocolNotSupported
- }
-
- ph.ids.pushSemaphore <- struct{}{}
- defer func() {
- <-ph.ids.pushSemaphore
- }()
-
- // negotiate a stream without opening a new connection as we "should" already have a connection.
- ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
- defer cancel()
- ctx = network.WithNoDial(ctx, "should already have connection")
-
- // newstream will open a stream on the first protocol the remote peer supports from the among
- // the list of protocols passed to it.
- s, err := ph.ids.Host.NewStream(ctx, ph.pid, protocol.ConvertFromStrings(protos)...)
- if err != nil {
- return nil, err
- }
-
- return s, err
-}
-
-// returns true if the peer supports atleast one of the given protocols
-func (ph *peerHandler) peerSupportsProtos(ctx context.Context, protos []string) bool {
- conns := ph.ids.Host.Network().ConnsToPeer(ph.pid)
- for _, c := range conns {
- select {
- case <-ph.ids.IdentifyWait(c):
- case <-ctx.Done():
- return false
- }
- }
-
- pstore := ph.ids.Host.Peerstore()
-
- if sup, err := pstore.SupportsProtocols(ph.pid, protos...); err == nil && len(sup) == 0 {
- return false
- }
- return true
-}
-
-func (ph *peerHandler) nextDelta() *pb.Delta {
- curr := ph.ids.Host.Mux().Protocols()
-
- // Extract the old protocol list and replace the old snapshot with an
- // updated one.
- ph.snapshotMu.Lock()
- snapshot := *ph.snapshot
- old := snapshot.protocols
- snapshot.protocols = curr
- ph.snapshot = &snapshot
- ph.snapshotMu.Unlock()
-
- oldProtos := make(map[string]struct{}, len(old))
- currProtos := make(map[string]struct{}, len(curr))
-
- for _, proto := range old {
- oldProtos[proto] = struct{}{}
- }
-
- for _, proto := range curr {
- currProtos[proto] = struct{}{}
- }
-
- var added []string
- var removed []string
-
- // has it been added ?
- for p := range currProtos {
- if _, ok := oldProtos[p]; !ok {
- added = append(added, p)
- }
- }
-
- // has it been removed ?
- for p := range oldProtos {
- if _, ok := currProtos[p]; !ok {
- removed = append(removed, p)
- }
- }
-
- return &pb.Delta{
- AddedProtocols: added,
- RmProtocols: removed,
- }
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go118.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/user_agent.go
similarity index 97%
rename from vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go118.go
rename to vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/user_agent.go
index f934dc66e..016f941f3 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go118.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/user_agent.go
@@ -1,5 +1,3 @@
-//go:build go1.18
-
package identify
import (
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go
index 583eea5c3..6ff5c3fb6 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go
@@ -71,7 +71,7 @@ func (p *PingService) PingHandler(s network.Stream) {
log.Error("ping loop failed without error")
}
}
- s.Reset()
+ s.Close()
}()
for {
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go
index b71d7ef89..e1a18e9b6 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go
@@ -10,19 +10,18 @@ import (
"runtime/debug"
"time"
- "github.com/minio/sha256-simd"
- "golang.org/x/crypto/chacha20poly1305"
-
- "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
-
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
"github.com/flynn/noise"
- "github.com/gogo/protobuf/proto"
pool "github.com/libp2p/go-buffer-pool"
+ "github.com/minio/sha256-simd"
+ "google.golang.org/protobuf/proto"
)
+//go:generate protoc --go_out=. --go_opt=Mpb/payload.proto=./pb pb/payload.proto
+
// payloadSigPrefix is prepended to our Noise static key before signing with
// our libp2p identity key.
const payloadSigPrefix = "noise-libp2p-static-key:"
@@ -65,11 +64,6 @@ func (s *secureSession) runHandshake(ctx context.Context) (err error) {
return fmt.Errorf("error initializing handshake state: %w", err)
}
- payload, err := s.generateHandshakePayload(kp)
- if err != nil {
- return err
- }
-
// set a deadline to complete the handshake, if one has been supplied.
// clear it after we're done.
if deadline, ok := ctx.Deadline(); ok {
@@ -79,20 +73,14 @@ func (s *secureSession) runHandshake(ctx context.Context) (err error) {
}
}
- // We can re-use this buffer for all handshake messages as its size
- // will be the size of the maximum handshake message for the Noise XX pattern.
- // Also, since we prefix every noise handshake message with its length, we need to account for
- // it when we fetch the buffer from the pool
- maxMsgSize := 2*noise.DH25519.DHLen() + len(payload) + 2*chacha20poly1305.Overhead
- hbuf := pool.Get(maxMsgSize + LengthPrefixLength)
+ // We can re-use this buffer for all handshake messages.
+ hbuf := pool.Get(2 << 10)
defer pool.Put(hbuf)
if s.initiator {
// stage 0 //
- // do not send the payload just yet, as it would be plaintext; not secret.
// Handshake Msg Len = len(DH ephemeral key)
- err = s.sendHandshakeMessage(hs, nil, hbuf)
- if err != nil {
+ if err := s.sendHandshakeMessage(hs, nil, hbuf); err != nil {
return fmt.Errorf("error sending handshake message: %w", err)
}
@@ -101,20 +89,32 @@ func (s *secureSession) runHandshake(ctx context.Context) (err error) {
if err != nil {
return fmt.Errorf("error reading handshake message: %w", err)
}
- err = s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
+ rcvdEd, err := s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
if err != nil {
return err
}
+ if s.initiatorEarlyDataHandler != nil {
+ if err := s.initiatorEarlyDataHandler.Received(ctx, s.insecureConn, rcvdEd); err != nil {
+ return err
+ }
+ }
// stage 2 //
// Handshake Msg Len = len(DHT static key) + MAC(static key is encrypted) + len(Payload) + MAC(payload is encrypted)
- err = s.sendHandshakeMessage(hs, payload, hbuf)
+ var ed *pb.NoiseExtensions
+ if s.initiatorEarlyDataHandler != nil {
+ ed = s.initiatorEarlyDataHandler.Send(ctx, s.insecureConn, s.remoteID)
+ }
+ payload, err := s.generateHandshakePayload(kp, ed)
if err != nil {
+ return err
+ }
+ if err := s.sendHandshakeMessage(hs, payload, hbuf); err != nil {
return fmt.Errorf("error sending handshake message: %w", err)
}
+ return nil
} else {
// stage 0 //
- // We don't expect any payload on the first message.
if _, err := s.readHandshakeMessage(hs); err != nil {
return fmt.Errorf("error reading handshake message: %w", err)
}
@@ -122,8 +122,15 @@ func (s *secureSession) runHandshake(ctx context.Context) (err error) {
// stage 1 //
// Handshake Msg Len = len(DH ephemeral key) + len(DHT static key) + MAC(static key is encrypted) + len(Payload) +
// MAC(payload is encrypted)
- err = s.sendHandshakeMessage(hs, payload, hbuf)
+ var ed *pb.NoiseExtensions
+ if s.responderEarlyDataHandler != nil {
+ ed = s.responderEarlyDataHandler.Send(ctx, s.insecureConn, s.remoteID)
+ }
+ payload, err := s.generateHandshakePayload(kp, ed)
if err != nil {
+ return err
+ }
+ if err := s.sendHandshakeMessage(hs, payload, hbuf); err != nil {
return fmt.Errorf("error sending handshake message: %w", err)
}
@@ -132,13 +139,17 @@ func (s *secureSession) runHandshake(ctx context.Context) (err error) {
if err != nil {
return fmt.Errorf("error reading handshake message: %w", err)
}
- err = s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
+ rcvdEd, err := s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
if err != nil {
return err
}
+ if s.responderEarlyDataHandler != nil {
+ if err := s.responderEarlyDataHandler.Received(ctx, s.insecureConn, rcvdEd); err != nil {
+ return err
+ }
+ }
+ return nil
}
-
- return nil
}
// setCipherStates sets the initial cipher states that will be used to protect
@@ -215,8 +226,8 @@ func (s *secureSession) readHandshakeMessage(hs *noise.HandshakeState) ([]byte,
// generateHandshakePayload creates a libp2p handshake payload with a
// signature of our static noise key.
-func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey) ([]byte, error) {
- // obtain the public key from the handshake session so we can sign it with
+func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey, ext *pb.NoiseExtensions) ([]byte, error) {
+ // obtain the public key from the handshake session, so we can sign it with
// our libp2p secret key.
localKeyRaw, err := crypto.MarshalPublicKey(s.LocalPublicKey())
if err != nil {
@@ -231,10 +242,11 @@ func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey) ([]byt
}
// create payload
- payload := new(pb.NoiseHandshakePayload)
- payload.IdentityKey = localKeyRaw
- payload.IdentitySig = signedPayload
- payloadEnc, err := proto.Marshal(payload)
+ payloadEnc, err := proto.Marshal(&pb.NoiseHandshakePayload{
+ IdentityKey: localKeyRaw,
+ IdentitySig: signedPayload,
+ Extensions: ext,
+ })
if err != nil {
return nil, fmt.Errorf("error marshaling handshake payload: %w", err)
}
@@ -243,30 +255,28 @@ func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey) ([]byt
// handleRemoteHandshakePayload unmarshals the handshake payload object sent
// by the remote peer and validates the signature against the peer's static Noise key.
-func (s *secureSession) handleRemoteHandshakePayload(payload []byte, remoteStatic []byte) error {
+// It returns the data attached to the payload.
+func (s *secureSession) handleRemoteHandshakePayload(payload []byte, remoteStatic []byte) (*pb.NoiseExtensions, error) {
// unmarshal payload
nhp := new(pb.NoiseHandshakePayload)
err := proto.Unmarshal(payload, nhp)
if err != nil {
- return fmt.Errorf("error unmarshaling remote handshake payload: %w", err)
+ return nil, fmt.Errorf("error unmarshaling remote handshake payload: %w", err)
}
// unpack remote peer's public libp2p key
remotePubKey, err := crypto.UnmarshalPublicKey(nhp.GetIdentityKey())
if err != nil {
- return err
+ return nil, err
}
id, err := peer.IDFromPublicKey(remotePubKey)
if err != nil {
- return err
+ return nil, err
}
- // check the peer ID for:
- // * all outbound connection
- // * inbound connections, if we know which peer we want to connect to (SecureInbound called with a peer ID)
- if (s.initiator && s.remoteID != id) || (!s.initiator && s.remoteID != "" && s.remoteID != id) {
- // use Pretty() as it produces the full b58-encoded string, rather than abbreviated forms.
- return fmt.Errorf("peer id mismatch: expected %s, but remote key matches %s", s.remoteID.Pretty(), id.Pretty())
+ // check the peer ID if enabled
+ if s.checkPeerID && s.remoteID != id {
+ return nil, fmt.Errorf("peer id mismatch: expected %s, but remote key matches %s", s.remoteID.Pretty(), id.Pretty())
}
// verify payload is signed by asserted remote libp2p key.
@@ -274,13 +284,13 @@ func (s *secureSession) handleRemoteHandshakePayload(payload []byte, remoteStati
msg := append([]byte(payloadSigPrefix), remoteStatic...)
ok, err := remotePubKey.Verify(msg, sig)
if err != nil {
- return fmt.Errorf("error verifying signature: %w", err)
+ return nil, fmt.Errorf("error verifying signature: %w", err)
} else if !ok {
- return fmt.Errorf("handshake signature invalid")
+ return nil, fmt.Errorf("handshake signature invalid")
}
// set remote peer key and id
s.remoteID = id
s.remoteKey = remotePubKey
- return nil
+ return nhp.Extensions, nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile
deleted file mode 100644
index 7cf8222f8..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $<
-
-clean:
- rm -f *.pb.go
- rm -f *.go
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go
index ffb29c3c8..8e3a805a5 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go
@@ -1,422 +1,239 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: payload.proto
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.21.12
+// source: pb/payload.proto
package pb
import (
- fmt "fmt"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+type NoiseExtensions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type NoiseHandshakePayload struct {
- IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey,proto3" json:"identity_key,omitempty"`
- IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig,proto3" json:"identity_sig,omitempty"`
- Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+ WebtransportCerthashes [][]byte `protobuf:"bytes,1,rep,name=webtransport_certhashes,json=webtransportCerthashes" json:"webtransport_certhashes,omitempty"`
+ StreamMuxers []string `protobuf:"bytes,2,rep,name=stream_muxers,json=streamMuxers" json:"stream_muxers,omitempty"`
}
-func (m *NoiseHandshakePayload) Reset() { *m = NoiseHandshakePayload{} }
-func (m *NoiseHandshakePayload) String() string { return proto.CompactTextString(m) }
-func (*NoiseHandshakePayload) ProtoMessage() {}
-func (*NoiseHandshakePayload) Descriptor() ([]byte, []int) {
- return fileDescriptor_678c914f1bee6d56, []int{0}
-}
-func (m *NoiseHandshakePayload) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NoiseHandshakePayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_NoiseHandshakePayload.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (x *NoiseExtensions) Reset() {
+ *x = NoiseExtensions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_payload_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
}
-func (m *NoiseHandshakePayload) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NoiseHandshakePayload.Merge(m, src)
-}
-func (m *NoiseHandshakePayload) XXX_Size() int {
- return m.Size()
-}
-func (m *NoiseHandshakePayload) XXX_DiscardUnknown() {
- xxx_messageInfo_NoiseHandshakePayload.DiscardUnknown(m)
+
+func (x *NoiseExtensions) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-var xxx_messageInfo_NoiseHandshakePayload proto.InternalMessageInfo
+func (*NoiseExtensions) ProtoMessage() {}
-func (m *NoiseHandshakePayload) GetIdentityKey() []byte {
- if m != nil {
- return m.IdentityKey
+func (x *NoiseExtensions) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_payload_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (m *NoiseHandshakePayload) GetIdentitySig() []byte {
- if m != nil {
- return m.IdentitySig
- }
- return nil
+// Deprecated: Use NoiseExtensions.ProtoReflect.Descriptor instead.
+func (*NoiseExtensions) Descriptor() ([]byte, []int) {
+ return file_pb_payload_proto_rawDescGZIP(), []int{0}
}
-func (m *NoiseHandshakePayload) GetData() []byte {
- if m != nil {
- return m.Data
+func (x *NoiseExtensions) GetWebtransportCerthashes() [][]byte {
+ if x != nil {
+ return x.WebtransportCerthashes
}
return nil
}
-func init() {
- proto.RegisterType((*NoiseHandshakePayload)(nil), "pb.NoiseHandshakePayload")
+func (x *NoiseExtensions) GetStreamMuxers() []string {
+ if x != nil {
+ return x.StreamMuxers
+ }
+ return nil
}
-func init() { proto.RegisterFile("payload.proto", fileDescriptor_678c914f1bee6d56) }
+type NoiseHandshakePayload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-var fileDescriptor_678c914f1bee6d56 = []byte{
- // 152 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x48, 0xac, 0xcc,
- 0xc9, 0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0x2a, 0xe4,
- 0x12, 0xf5, 0xcb, 0xcf, 0x2c, 0x4e, 0xf5, 0x48, 0xcc, 0x4b, 0x29, 0xce, 0x48, 0xcc, 0x4e, 0x0d,
- 0x80, 0x28, 0x11, 0x52, 0xe4, 0xe2, 0xc9, 0x4c, 0x49, 0xcd, 0x2b, 0xc9, 0x2c, 0xa9, 0x8c, 0xcf,
- 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0xe2, 0x86, 0x89, 0x79, 0xa7, 0x56, 0xa2,
- 0x28, 0x29, 0xce, 0x4c, 0x97, 0x60, 0x42, 0x55, 0x12, 0x9c, 0x99, 0x2e, 0x24, 0xc4, 0xc5, 0x92,
- 0x92, 0x58, 0x92, 0x28, 0xc1, 0x0c, 0x96, 0x02, 0xb3, 0x9d, 0x24, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
- 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63,
- 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89, 0x0d, 0xec, 0x2e, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0x51, 0x37, 0xd7, 0x40, 0xa8, 0x00, 0x00, 0x00,
+ IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey" json:"identity_key,omitempty"`
+ IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig" json:"identity_sig,omitempty"`
+ Extensions *NoiseExtensions `protobuf:"bytes,4,opt,name=extensions" json:"extensions,omitempty"`
}
-func (m *NoiseHandshakePayload) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (x *NoiseHandshakePayload) Reset() {
+ *x = NoiseHandshakePayload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pb_payload_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return dAtA[:n], nil
}
-func (m *NoiseHandshakePayload) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (x *NoiseHandshakePayload) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *NoiseHandshakePayload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Data) > 0 {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintPayload(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.IdentitySig) > 0 {
- i -= len(m.IdentitySig)
- copy(dAtA[i:], m.IdentitySig)
- i = encodeVarintPayload(dAtA, i, uint64(len(m.IdentitySig)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.IdentityKey) > 0 {
- i -= len(m.IdentityKey)
- copy(dAtA[i:], m.IdentityKey)
- i = encodeVarintPayload(dAtA, i, uint64(len(m.IdentityKey)))
- i--
- dAtA[i] = 0xa
+func (*NoiseHandshakePayload) ProtoMessage() {}
+
+func (x *NoiseHandshakePayload) ProtoReflect() protoreflect.Message {
+ mi := &file_pb_payload_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return len(dAtA) - i, nil
+ return mi.MessageOf(x)
}
-func encodeVarintPayload(dAtA []byte, offset int, v uint64) int {
- offset -= sovPayload(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
+// Deprecated: Use NoiseHandshakePayload.ProtoReflect.Descriptor instead.
+func (*NoiseHandshakePayload) Descriptor() ([]byte, []int) {
+ return file_pb_payload_proto_rawDescGZIP(), []int{1}
}
-func (m *NoiseHandshakePayload) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.IdentityKey)
- if l > 0 {
- n += 1 + l + sovPayload(uint64(l))
- }
- l = len(m.IdentitySig)
- if l > 0 {
- n += 1 + l + sovPayload(uint64(l))
- }
- l = len(m.Data)
- if l > 0 {
- n += 1 + l + sovPayload(uint64(l))
+
+func (x *NoiseHandshakePayload) GetIdentityKey() []byte {
+ if x != nil {
+ return x.IdentityKey
}
- return n
+ return nil
}
-func sovPayload(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozPayload(x uint64) (n int) {
- return sovPayload(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *NoiseHandshakePayload) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NoiseHandshakePayload: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NoiseHandshakePayload: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IdentityKey", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPayload
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPayload
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IdentityKey = append(m.IdentityKey[:0], dAtA[iNdEx:postIndex]...)
- if m.IdentityKey == nil {
- m.IdentityKey = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field IdentitySig", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPayload
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPayload
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.IdentitySig = append(m.IdentitySig[:0], dAtA[iNdEx:postIndex]...)
- if m.IdentitySig == nil {
- m.IdentitySig = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthPayload
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthPayload
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPayload(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthPayload
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+func (x *NoiseHandshakePayload) GetIdentitySig() []byte {
+ if x != nil {
+ return x.IdentitySig
}
+ return nil
+}
- if iNdEx > l {
- return io.ErrUnexpectedEOF
+func (x *NoiseHandshakePayload) GetExtensions() *NoiseExtensions {
+ if x != nil {
+ return x.Extensions
}
return nil
}
-func skipPayload(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
+
+var File_pb_payload_proto protoreflect.FileDescriptor
+
+var file_pb_payload_proto_rawDesc = []byte{
+ 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x6f, 0x0a, 0x0f, 0x4e, 0x6f, 0x69, 0x73, 0x65, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x17, 0x77, 0x65, 0x62,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x68, 0x61,
+ 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x16, 0x77, 0x65, 0x62, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x65, 0x72, 0x74, 0x68, 0x61, 0x73, 0x68,
+ 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6d, 0x75, 0x78,
+ 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x4d, 0x75, 0x78, 0x65, 0x72, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x15, 0x4e, 0x6f, 0x69, 0x73,
+ 0x65, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61,
+ 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
+ 0x5f, 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x53, 0x69, 0x67, 0x12, 0x33, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62,
+ 0x2e, 0x4e, 0x6f, 0x69, 0x73, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+}
+
+var (
+ file_pb_payload_proto_rawDescOnce sync.Once
+ file_pb_payload_proto_rawDescData = file_pb_payload_proto_rawDesc
+)
+
+func file_pb_payload_proto_rawDescGZIP() []byte {
+ file_pb_payload_proto_rawDescOnce.Do(func() {
+ file_pb_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_payload_proto_rawDescData)
+ })
+ return file_pb_payload_proto_rawDescData
+}
+
+var file_pb_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_pb_payload_proto_goTypes = []interface{}{
+ (*NoiseExtensions)(nil), // 0: pb.NoiseExtensions
+ (*NoiseHandshakePayload)(nil), // 1: pb.NoiseHandshakePayload
+}
+var file_pb_payload_proto_depIdxs = []int32{
+ 0, // 0: pb.NoiseHandshakePayload.extensions:type_name -> pb.NoiseExtensions
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_pb_payload_proto_init() }
+func file_pb_payload_proto_init() {
+ if File_pb_payload_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_pb_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NoiseExtensions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
}
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPayload
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthPayload
+ file_pb_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NoiseHandshakePayload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
}
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupPayload
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthPayload
- }
- if depth == 0 {
- return iNdEx, nil
}
}
- return 0, io.ErrUnexpectedEOF
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_pb_payload_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_pb_payload_proto_goTypes,
+ DependencyIndexes: file_pb_payload_proto_depIdxs,
+ MessageInfos: file_pb_payload_proto_msgTypes,
+ }.Build()
+ File_pb_payload_proto = out.File
+ file_pb_payload_proto_rawDesc = nil
+ file_pb_payload_proto_goTypes = nil
+ file_pb_payload_proto_depIdxs = nil
}
-
-var (
- ErrInvalidLengthPayload = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPayload = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupPayload = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.proto b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.proto
index 05a78c6f3..ff303b0da 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.proto
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.proto
@@ -1,8 +1,13 @@
-syntax = "proto3";
+syntax = "proto2";
package pb;
+message NoiseExtensions {
+ repeated bytes webtransport_certhashes = 1;
+ repeated string stream_muxers = 2;
+}
+
message NoiseHandshakePayload {
- bytes identity_key = 1;
- bytes identity_sig = 2;
- bytes data = 3;
+ optional bytes identity_key = 1;
+ optional bytes identity_sig = 2;
+ optional NoiseExtensions extensions = 4;
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go
index a563e58b5..fa32ab8fa 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go
@@ -10,11 +10,14 @@ import (
"github.com/flynn/noise"
"github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
)
type secureSession struct {
- initiator bool
+ initiator bool
+ checkPeerID bool
localID peer.ID
localKey crypto.PrivKey
@@ -37,19 +40,27 @@ type secureSession struct {
// noise prologue
prologue []byte
+
+ initiatorEarlyDataHandler, responderEarlyDataHandler EarlyDataHandler
+
+ // ConnectionState holds state information releated to the secureSession entity.
+ connectionState network.ConnectionState
}
// newSecureSession creates a Noise session over the given insecureConn Conn, using
// the libp2p identity keypair from the given Transport.
-func newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, prologue []byte, initiator bool) (*secureSession, error) {
+func newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, prologue []byte, initiatorEDH, responderEDH EarlyDataHandler, initiator, checkPeerID bool) (*secureSession, error) {
s := &secureSession{
- insecureConn: insecure,
- insecureReader: bufio.NewReader(insecure),
- initiator: initiator,
- localID: tpt.localID,
- localKey: tpt.privateKey,
- remoteID: remote,
- prologue: prologue,
+ insecureConn: insecure,
+ insecureReader: bufio.NewReader(insecure),
+ initiator: initiator,
+ localID: tpt.localID,
+ localKey: tpt.privateKey,
+ remoteID: remote,
+ prologue: prologue,
+ initiatorEarlyDataHandler: initiatorEDH,
+ responderEarlyDataHandler: responderEDH,
+ checkPeerID: checkPeerID,
}
// the go-routine we create to run the handshake will
@@ -84,10 +95,6 @@ func (s *secureSession) LocalPeer() peer.ID {
return s.localID
}
-func (s *secureSession) LocalPrivateKey() crypto.PrivKey {
- return s.localKey
-}
-
func (s *secureSession) LocalPublicKey() crypto.PubKey {
return s.localKey.GetPublic()
}
@@ -104,6 +111,10 @@ func (s *secureSession) RemotePublicKey() crypto.PubKey {
return s.remoteKey
}
+func (s *secureSession) ConnState() network.ConnectionState {
+ return s.connectionState
+}
+
func (s *secureSession) SetDeadline(t time.Time) error {
return s.insecureConn.SetDeadline(t)
}
@@ -119,3 +130,11 @@ func (s *secureSession) SetWriteDeadline(t time.Time) error {
func (s *secureSession) Close() error {
return s.insecureConn.Close()
}
+
+func SessionWithConnState(s *secureSession, muxer protocol.ID) *secureSession {
+ if s != nil {
+ s.connectionState.StreamMultiplexer = muxer
+ s.connectionState.UsedEarlyMuxerNegotiation = muxer != ""
+ }
+ return s
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session_transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session_transport.go
index b4414136e..0f26f3fa8 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session_transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session_transport.go
@@ -6,12 +6,62 @@ import (
"github.com/libp2p/go-libp2p/core/canonicallog"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+
manet "github.com/multiformats/go-multiaddr/net"
)
type SessionOption = func(*SessionTransport) error
+// Prologue sets a prologue for the Noise session.
+// The handshake will only complete successfully if both parties set the same prologue.
+// See https://noiseprotocol.org/noise.html#prologue for details.
+func Prologue(prologue []byte) SessionOption {
+ return func(s *SessionTransport) error {
+ s.prologue = prologue
+ return nil
+ }
+}
+
+// EarlyDataHandler defines what the application payload is for either the second
+// (if responder) or third (if initiator) handshake message, and defines the
+// logic for handling the other side's early data. Note the early data in the
+// second handshake message is encrypted, but the peer is not authenticated at that point.
+type EarlyDataHandler interface {
+ // Send for the initiator is called for the client before sending the third
+ // handshake message. Defines the application payload for the third message.
+ // Send for the responder is called before sending the second handshake message.
+ Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions
+ // Received for the initiator is called when the second handshake message
+ // from the responder is received.
+ // Received for the responder is called when the third handshake message
+ // from the initiator is received.
+ Received(context.Context, net.Conn, *pb.NoiseExtensions) error
+}
+
+// EarlyData sets the `EarlyDataHandler` for the initiator and responder roles.
+// See `EarlyDataHandler` for more details.
+func EarlyData(initiator, responder EarlyDataHandler) SessionOption {
+ return func(s *SessionTransport) error {
+ s.initiatorEarlyDataHandler = initiator
+ s.responderEarlyDataHandler = responder
+ return nil
+ }
+}
+
+// DisablePeerIDCheck disables checking the remote peer ID for a noise connection.
+// For outbound connections, this is the equivalent of calling `SecureInbound` with an empty
+// peer ID. This is susceptible to MITM attacks since we do not verify the identity of the remote
+// peer.
+func DisablePeerIDCheck() SessionOption {
+ return func(s *SessionTransport) error {
+ s.disablePeerIDCheck = true
+ return nil
+ }
+}
+
var _ sec.SecureTransport = &SessionTransport{}
// SessionTransport can be used
@@ -19,13 +69,19 @@ var _ sec.SecureTransport = &SessionTransport{}
type SessionTransport struct {
t *Transport
// options
- prologue []byte
+ prologue []byte
+ disablePeerIDCheck bool
+
+ protocolID protocol.ID
+
+ initiatorEarlyDataHandler, responderEarlyDataHandler EarlyDataHandler
}
// SecureInbound runs the Noise handshake as the responder.
// If p is empty, connections from any peer are accepted.
func (i *SessionTransport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
- c, err := newSecureSession(i.t, ctx, insecure, p, i.prologue, false)
+ checkPeerID := !i.disablePeerIDCheck && p != ""
+ c, err := newSecureSession(i.t, ctx, insecure, p, i.prologue, i.initiatorEarlyDataHandler, i.responderEarlyDataHandler, false, checkPeerID)
if err != nil {
addr, maErr := manet.FromNetAddr(insecure.RemoteAddr())
if maErr == nil {
@@ -37,22 +93,9 @@ func (i *SessionTransport) SecureInbound(ctx context.Context, insecure net.Conn,
// SecureOutbound runs the Noise handshake as the initiator.
func (i *SessionTransport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
- return newSecureSession(i.t, ctx, insecure, p, i.prologue, true)
+ return newSecureSession(i.t, ctx, insecure, p, i.prologue, i.initiatorEarlyDataHandler, i.responderEarlyDataHandler, true, !i.disablePeerIDCheck)
}
-func (t *Transport) WithSessionOptions(opts ...SessionOption) (sec.SecureTransport, error) {
- st := &SessionTransport{t: t}
- for _, opt := range opts {
- if err := opt(st); err != nil {
- return nil, err
- }
- }
- return st, nil
-}
-
-func Prologue(prologue []byte) SessionOption {
- return func(s *SessionTransport) error {
- s.prologue = prologue
- return nil
- }
+func (i *SessionTransport) ID() protocol.ID {
+ return i.protocolID
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go
index f935b82a7..e42cea1bf 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go
@@ -7,51 +7,125 @@ import (
"github.com/libp2p/go-libp2p/core/canonicallog"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/sec"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
manet "github.com/multiformats/go-multiaddr/net"
)
// ID is the protocol ID for noise
const ID = "/noise"
+const maxProtoNum = 100
-var _ sec.SecureTransport = &Transport{}
-
-// Transport implements the interface sec.SecureTransport
-// https://godoc.org/github.com/libp2p/go-libp2p/core/sec#SecureConn
type Transport struct {
+ protocolID protocol.ID
localID peer.ID
privateKey crypto.PrivKey
+ muxers []protocol.ID
}
+var _ sec.SecureTransport = &Transport{}
+
// New creates a new Noise transport using the given private key as its
// libp2p identity key.
-func New(privkey crypto.PrivKey) (*Transport, error) {
+func New(id protocol.ID, privkey crypto.PrivKey, muxers []tptu.StreamMuxer) (*Transport, error) {
localID, err := peer.IDFromPrivateKey(privkey)
if err != nil {
return nil, err
}
+ muxerIDs := make([]protocol.ID, 0, len(muxers))
+ for _, m := range muxers {
+ muxerIDs = append(muxerIDs, m.ID)
+ }
+
return &Transport{
+ protocolID: id,
localID: localID,
privateKey: privkey,
+ muxers: muxerIDs,
}, nil
}
// SecureInbound runs the Noise handshake as the responder.
// If p is empty, connections from any peer are accepted.
func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
- c, err := newSecureSession(t, ctx, insecure, p, nil, false)
+ responderEDH := newTransportEDH(t)
+ c, err := newSecureSession(t, ctx, insecure, p, nil, nil, responderEDH, false, p != "")
if err != nil {
addr, maErr := manet.FromNetAddr(insecure.RemoteAddr())
if maErr == nil {
canonicallog.LogPeerStatus(100, p, addr, "handshake_failure", "noise", "err", err.Error())
}
}
- return c, err
+ return SessionWithConnState(c, responderEDH.MatchMuxers(false)), err
}
// SecureOutbound runs the Noise handshake as the initiator.
func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
- return newSecureSession(t, ctx, insecure, p, nil, true)
+ initiatorEDH := newTransportEDH(t)
+ c, err := newSecureSession(t, ctx, insecure, p, nil, initiatorEDH, nil, true, true)
+ if err != nil {
+ return c, err
+ }
+ return SessionWithConnState(c, initiatorEDH.MatchMuxers(true)), err
+}
+
+func (t *Transport) WithSessionOptions(opts ...SessionOption) (*SessionTransport, error) {
+ st := &SessionTransport{t: t, protocolID: t.protocolID}
+ for _, opt := range opts {
+ if err := opt(st); err != nil {
+ return nil, err
+ }
+ }
+ return st, nil
+}
+
+func (t *Transport) ID() protocol.ID {
+ return t.protocolID
+}
+
+func matchMuxers(initiatorMuxers, responderMuxers []protocol.ID) protocol.ID {
+ for _, initMuxer := range initiatorMuxers {
+ for _, respMuxer := range responderMuxers {
+ if initMuxer == respMuxer {
+ return initMuxer
+ }
+ }
+ }
+ return ""
+}
+
+type transportEarlyDataHandler struct {
+ transport *Transport
+ receivedMuxers []protocol.ID
+}
+
+var _ EarlyDataHandler = &transportEarlyDataHandler{}
+
+func newTransportEDH(t *Transport) *transportEarlyDataHandler {
+ return &transportEarlyDataHandler{transport: t}
+}
+
+func (i *transportEarlyDataHandler) Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions {
+ return &pb.NoiseExtensions{
+ StreamMuxers: protocol.ConvertToStrings(i.transport.muxers),
+ }
+}
+
+func (i *transportEarlyDataHandler) Received(_ context.Context, _ net.Conn, extension *pb.NoiseExtensions) error {
+ // Discard messages with size or the number of protocols exceeding extension limit for security.
+ if extension != nil && len(extension.StreamMuxers) <= maxProtoNum {
+ i.receivedMuxers = protocol.ConvertFromStrings(extension.GetStreamMuxers())
+ }
+ return nil
+}
+
+func (i *transportEarlyDataHandler) MatchMuxers(isInitiator bool) protocol.ID {
+ if isInitiator {
+ return matchMuxers(i.transport.muxers, i.receivedMuxers)
+ }
+ return matchMuxers(i.receivedMuxers, i.transport.muxers)
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go
index 6353eac80..143da3921 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go
@@ -4,6 +4,7 @@ import (
"crypto/tls"
ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/sec"
)
@@ -11,11 +12,10 @@ import (
type conn struct {
*tls.Conn
- localPeer peer.ID
- privKey ci.PrivKey
-
- remotePeer peer.ID
- remotePubKey ci.PubKey
+ localPeer peer.ID
+ remotePeer peer.ID
+ remotePubKey ci.PubKey
+ connectionState network.ConnectionState
}
var _ sec.SecureConn = &conn{}
@@ -24,10 +24,6 @@ func (c *conn) LocalPeer() peer.ID {
return c.localPeer
}
-func (c *conn) LocalPrivateKey() ci.PrivKey {
- return c.privKey
-}
-
func (c *conn) RemotePeer() peer.ID {
return c.remotePeer
}
@@ -35,3 +31,7 @@ func (c *conn) RemotePeer() peer.ID {
func (c *conn) RemotePublicKey() ci.PubKey {
return c.remotePubKey
}
+
+func (c *conn) ConnState() network.ConnectionState {
+ return c.connectionState
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/crypto.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/crypto.go
index aa16b334f..b8f23f39e 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/crypto.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/crypto.go
@@ -16,8 +16,6 @@ import (
"runtime/debug"
"time"
- "golang.org/x/sys/cpu"
-
ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
)
@@ -75,11 +73,10 @@ func NewIdentity(privKey ic.PrivKey, opts ...IdentityOption) (*Identity, error)
}
return &Identity{
config: tls.Config{
- MinVersion: tls.VersionTLS13,
- PreferServerCipherSuites: preferServerCipherSuites(),
- InsecureSkipVerify: true, // This is not insecure here. We will verify the cert chain ourselves.
- ClientAuth: tls.RequireAnyClientCert,
- Certificates: []tls.Certificate{*cert},
+ MinVersion: tls.VersionTLS13,
+ InsecureSkipVerify: true, // This is not insecure here. We will verify the cert chain ourselves.
+ ClientAuth: tls.RequireAnyClientCert,
+ Certificates: []tls.Certificate{*cert},
VerifyPeerCertificate: func(_ [][]byte, _ [][]*x509.Certificate) error {
panic("tls config not specialized for peer")
},
@@ -271,25 +268,3 @@ func certTemplate() (*x509.Certificate, error) {
Subject: pkix.Name{SerialNumber: subjectSN.String()},
}, nil
}
-
-// We want nodes without AES hardware (e.g. ARM) support to always use ChaCha.
-// Only if both nodes have AES hardware support (e.g. x86), AES should be used.
-// x86->x86: AES, ARM->x86: ChaCha, x86->ARM: ChaCha and ARM->ARM: Chacha
-// This function returns true if we don't have AES hardware support, and false otherwise.
-// Thus, ARM servers will always use their own cipher suite preferences (ChaCha first),
-// and x86 servers will always use the client's cipher suite preferences.
-func preferServerCipherSuites() bool {
- // Copied from the Go TLS implementation.
-
- // Check the cpu flags for each platform that has optimized GCM implementations.
- // Worst case, these variables will just all be false.
- var (
- hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasGCMAsm = hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
- )
- return !hasGCMAsm
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go
index f6aa64f6a..7c28efe37 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go
@@ -11,8 +11,11 @@ import (
"github.com/libp2p/go-libp2p/core/canonicallog"
ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/sec"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
manet "github.com/multiformats/go-multiaddr/net"
)
@@ -24,19 +27,29 @@ const ID = "/tls/1.0.0"
type Transport struct {
identity *Identity
- localPeer peer.ID
- privKey ci.PrivKey
+ localPeer peer.ID
+ privKey ci.PrivKey
+ muxers []protocol.ID
+ protocolID protocol.ID
}
+var _ sec.SecureTransport = &Transport{}
+
// New creates a TLS encrypted transport
-func New(key ci.PrivKey) (*Transport, error) {
- id, err := peer.IDFromPrivateKey(key)
+func New(id protocol.ID, key ci.PrivKey, muxers []tptu.StreamMuxer) (*Transport, error) {
+ localPeer, err := peer.IDFromPrivateKey(key)
if err != nil {
return nil, err
}
+ muxerIDs := make([]protocol.ID, 0, len(muxers))
+ for _, m := range muxers {
+ muxerIDs = append(muxerIDs, m.ID)
+ }
t := &Transport{
- localPeer: id,
- privKey: key,
+ protocolID: id,
+ localPeer: localPeer,
+ privKey: key,
+ muxers: muxerIDs,
}
identity, err := NewIdentity(key)
@@ -47,12 +60,35 @@ func New(key ci.PrivKey) (*Transport, error) {
return t, nil
}
-var _ sec.SecureTransport = &Transport{}
-
// SecureInbound runs the TLS handshake as a server.
// If p is empty, connections from any peer are accepted.
func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
config, keyCh := t.identity.ConfigForPeer(p)
+ muxers := make([]string, 0, len(t.muxers))
+ for _, muxer := range t.muxers {
+ muxers = append(muxers, string(muxer))
+ }
+ // TLS' ALPN selection lets the server select the protocol, preferring the server's preferences.
+ // We want to prefer the client's preference though.
+ getConfigForClient := config.GetConfigForClient
+ config.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
+ alpnLoop:
+ for _, proto := range info.SupportedProtos {
+ for _, m := range muxers {
+ if m == proto {
+ // Match found. Select this muxer, as it's the client's preference.
+ // There's no need to add the "libp2p" entry here.
+ config.NextProtos = []string{proto}
+ break alpnLoop
+ }
+ }
+ }
+ if getConfigForClient != nil {
+ return getConfigForClient(info)
+ }
+ return config, nil
+ }
+ config.NextProtos = append(muxers, config.NextProtos...)
cs, err := t.handshake(ctx, tls.Server(insecure, config), keyCh)
if err != nil {
addr, maErr := manet.FromNetAddr(insecure.RemoteAddr())
@@ -73,6 +109,12 @@ func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer
// notice this after 1 RTT when calling Read.
func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
config, keyCh := t.identity.ConfigForPeer(p)
+ muxers := make([]string, 0, len(t.muxers))
+ for _, muxer := range t.muxers {
+ muxers = append(muxers, (string)(muxer))
+ }
+ // Prepend the prefered muxers list to TLS config.
+ config.NextProtos = append(muxers, config.NextProtos...)
cs, err := t.handshake(ctx, tls.Client(insecure, config), keyCh)
if err != nil {
insecure.Close()
@@ -89,6 +131,7 @@ func (t *Transport) handshake(ctx context.Context, tlsConn *tls.Conn, keyCh <-ch
}
}()
+ // handshaking...
if err := tlsConn.HandshakeContext(ctx); err != nil {
return nil, err
}
@@ -111,11 +154,29 @@ func (t *Transport) setupConn(tlsConn *tls.Conn, remotePubKey ci.PubKey) (sec.Se
if err != nil {
return nil, err
}
+
+ nextProto := tlsConn.ConnectionState().NegotiatedProtocol
+ // The special ALPN extension value "libp2p" is used by libp2p versions
+ // that don't support early muxer negotiation. If we see this sepcial
+ // value selected, that means we are handshaking with a version that does
+ // not support early muxer negotiation. In this case return empty nextProto
+ // to indicate no muxer is selected.
+ if nextProto == "libp2p" {
+ nextProto = ""
+ }
+
return &conn{
Conn: tlsConn,
localPeer: t.localPeer,
- privKey: t.privKey,
remotePeer: remotePeerID,
remotePubKey: remotePubKey,
+ connectionState: network.ConnectionState{
+ StreamMultiplexer: protocol.ID(nextProto),
+ UsedEarlyMuxerNegotiation: nextProto != "",
+ },
}, nil
}
+
+func (t *Transport) ID() protocol.ID {
+ return t.protocolID
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go
index 58537b6b1..a2da81eb3 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go
@@ -8,18 +8,16 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
tpt "github.com/libp2p/go-libp2p/core/transport"
- "github.com/lucas-clemente/quic-go"
ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
)
type conn struct {
quicConn quic.Connection
- pconn *reuseConn
transport *transport
scope network.ConnManagementScope
localPeer peer.ID
- privKey ic.PrivKey
localMultiaddr ma.Multiaddr
remotePeerID peer.ID
@@ -33,9 +31,12 @@ var _ tpt.CapableConn = &conn{}
// It must be called even if the peer closed the connection in order for
// garbage collection to properly work in this package.
func (c *conn) Close() error {
+ return c.closeWithError(0, "")
+}
+
+func (c *conn) closeWithError(errCode quic.ApplicationErrorCode, errString string) error {
c.transport.removeConn(c.quicConn)
- err := c.quicConn.CloseWithError(0, "")
- c.pconn.DecreaseCount()
+ err := c.quicConn.CloseWithError(errCode, errString)
c.scope.Done()
return err
}
@@ -62,39 +63,29 @@ func (c *conn) AcceptStream() (network.MuxedStream, error) {
}
// LocalPeer returns our peer ID
-func (c *conn) LocalPeer() peer.ID {
- return c.localPeer
-}
-
-// LocalPrivateKey returns our private key
-func (c *conn) LocalPrivateKey() ic.PrivKey {
- return c.privKey
-}
+func (c *conn) LocalPeer() peer.ID { return c.localPeer }
// RemotePeer returns the peer ID of the remote peer.
-func (c *conn) RemotePeer() peer.ID {
- return c.remotePeerID
-}
+func (c *conn) RemotePeer() peer.ID { return c.remotePeerID }
// RemotePublicKey returns the public key of the remote peer.
-func (c *conn) RemotePublicKey() ic.PubKey {
- return c.remotePubKey
-}
+func (c *conn) RemotePublicKey() ic.PubKey { return c.remotePubKey }
// LocalMultiaddr returns the local Multiaddr associated
-func (c *conn) LocalMultiaddr() ma.Multiaddr {
- return c.localMultiaddr
-}
+func (c *conn) LocalMultiaddr() ma.Multiaddr { return c.localMultiaddr }
// RemoteMultiaddr returns the remote Multiaddr associated
-func (c *conn) RemoteMultiaddr() ma.Multiaddr {
- return c.remoteMultiaddr
-}
+func (c *conn) RemoteMultiaddr() ma.Multiaddr { return c.remoteMultiaddr }
-func (c *conn) Transport() tpt.Transport {
- return c.transport
-}
+func (c *conn) Transport() tpt.Transport { return c.transport }
+
+func (c *conn) Scope() network.ConnScope { return c.scope }
-func (c *conn) Scope() network.ConnScope {
- return c.scope
+// ConnState is the state of security connection.
+func (c *conn) ConnState() network.ConnectionState {
+ t := "quic-v1"
+ if _, err := c.LocalMultiaddr().ValueForProtocol(ma.P_QUIC); err == nil {
+ t = "quic"
+ }
+ return network.ConnectionState{Transport: t}
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go
index ddf18441c..73bb5026b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go
@@ -2,7 +2,7 @@ package libp2pquic
import (
"context"
- "crypto/tls"
+ "errors"
"net"
ic "github.com/libp2p/go-libp2p/core/crypto"
@@ -10,73 +10,59 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
tpt "github.com/libp2p/go-libp2p/core/transport"
p2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
- "github.com/lucas-clemente/quic-go"
ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
)
-var quicListen = quic.Listen // so we can mock it in tests
-
// A listener listens for QUIC connections.
type listener struct {
- quicListener quic.Listener
- conn *reuseConn
- transport *transport
- rcmgr network.ResourceManager
- privKey ic.PrivKey
- localPeer peer.ID
- localMultiaddr ma.Multiaddr
+ reuseListener quicreuse.Listener
+ transport *transport
+ rcmgr network.ResourceManager
+ privKey ic.PrivKey
+ localPeer peer.ID
+ localMultiaddrs map[quic.VersionNumber]ma.Multiaddr
}
-var _ tpt.Listener = &listener{}
-
-func newListener(rconn *reuseConn, t *transport, localPeer peer.ID, key ic.PrivKey, identity *p2ptls.Identity, rcmgr network.ResourceManager) (tpt.Listener, error) {
- var tlsConf tls.Config
- tlsConf.GetConfigForClient = func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
- // return a tls.Config that verifies the peer's certificate chain.
- // Note that since we have no way of associating an incoming QUIC connection with
- // the peer ID calculated here, we don't actually receive the peer's public key
- // from the key chan.
- conf, _ := identity.ConfigForPeer("")
- return conf, nil
- }
- ln, err := quicListen(rconn, &tlsConf, t.serverConfig)
- if err != nil {
- return nil, err
- }
- localMultiaddr, err := toQuicMultiaddr(ln.Addr())
- if err != nil {
- return nil, err
+func newListener(ln quicreuse.Listener, t *transport, localPeer peer.ID, key ic.PrivKey, rcmgr network.ResourceManager) (listener, error) {
+ localMultiaddrs := make(map[quic.VersionNumber]ma.Multiaddr)
+ for _, addr := range ln.Multiaddrs() {
+ if _, err := addr.ValueForProtocol(ma.P_QUIC); err == nil {
+ localMultiaddrs[quic.VersionDraft29] = addr
+ }
+ if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ localMultiaddrs[quic.Version1] = addr
+ }
}
- return &listener{
- conn: rconn,
- quicListener: ln,
- transport: t,
- rcmgr: rcmgr,
- privKey: key,
- localPeer: localPeer,
- localMultiaddr: localMultiaddr,
+
+ return listener{
+ reuseListener: ln,
+ transport: t,
+ rcmgr: rcmgr,
+ privKey: key,
+ localPeer: localPeer,
+ localMultiaddrs: localMultiaddrs,
}, nil
}
// Accept accepts new connections.
func (l *listener) Accept() (tpt.CapableConn, error) {
for {
- qconn, err := l.quicListener.Accept(context.Background())
+ qconn, err := l.reuseListener.Accept(context.Background())
if err != nil {
return nil, err
}
c, err := l.setupConn(qconn)
if err != nil {
- qconn.CloseWithError(0, err.Error())
continue
}
+ l.transport.addConn(qconn, c)
if l.transport.gater != nil && !(l.transport.gater.InterceptAccept(c) && l.transport.gater.InterceptSecured(network.DirInbound, c.remotePeerID, c)) {
- c.scope.Done()
- qconn.CloseWithError(errorCodeConnectionGating, "connection gated")
+ c.closeWithError(errorCodeConnectionGating, "connection gated")
continue
}
- l.transport.addConn(qconn, c)
// return through active hole punching if any
key := holePunchKey{addr: qconn.RemoteAddr().String(), peer: c.remotePeerID}
@@ -97,7 +83,7 @@ func (l *listener) Accept() (tpt.CapableConn, error) {
}
func (l *listener) setupConn(qconn quic.Connection) (*conn, error) {
- remoteMultiaddr, err := toQuicMultiaddr(qconn.RemoteAddr())
+ remoteMultiaddr, err := quicreuse.ToQuicMultiaddr(qconn.RemoteAddr(), qconn.ConnectionState().Version)
if err != nil {
return nil, err
}
@@ -107,35 +93,46 @@ func (l *listener) setupConn(qconn quic.Connection) (*conn, error) {
log.Debugw("resource manager blocked incoming connection", "addr", qconn.RemoteAddr(), "error", err)
return nil, err
}
+ c, err := l.setupConnWithScope(qconn, connScope, remoteMultiaddr)
+ if err != nil {
+ connScope.Done()
+ qconn.CloseWithError(1, "")
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func (l *listener) setupConnWithScope(qconn quic.Connection, connScope network.ConnManagementScope, remoteMultiaddr ma.Multiaddr) (*conn, error) {
+
// The tls.Config used to establish this connection already verified the certificate chain.
// Since we don't have any way of knowing which tls.Config was used though,
// we have to re-determine the peer's identity here.
// Therefore, this is expected to never fail.
remotePubKey, err := p2ptls.PubKeyFromCertChain(qconn.ConnectionState().TLS.PeerCertificates)
if err != nil {
- connScope.Done()
return nil, err
}
remotePeerID, err := peer.IDFromPublicKey(remotePubKey)
if err != nil {
- connScope.Done()
return nil, err
}
if err := connScope.SetPeer(remotePeerID); err != nil {
log.Debugw("resource manager blocked incoming connection for peer", "peer", remotePeerID, "addr", qconn.RemoteAddr(), "error", err)
- connScope.Done()
return nil, err
}
- l.conn.IncreaseCount()
+ localMultiaddr, found := l.localMultiaddrs[qconn.ConnectionState().Version]
+ if !found {
+ return nil, errors.New("unknown QUIC version:" + qconn.ConnectionState().Version.String())
+ }
+
return &conn{
quicConn: qconn,
- pconn: l.conn,
transport: l.transport,
scope: connScope,
localPeer: l.localPeer,
- localMultiaddr: l.localMultiaddr,
- privKey: l.privKey,
+ localMultiaddr: localMultiaddr,
remoteMultiaddr: remoteMultiaddr,
remotePeerID: remotePeerID,
remotePubKey: remotePubKey,
@@ -144,16 +141,10 @@ func (l *listener) setupConn(qconn quic.Connection) (*conn, error) {
// Close closes the listener.
func (l *listener) Close() error {
- defer l.conn.DecreaseCount()
- return l.quicListener.Close()
+ return l.reuseListener.Close()
}
// Addr returns the address of this listener.
func (l *listener) Addr() net.Addr {
- return l.quicListener.Addr()
-}
-
-// Multiaddr returns the multiaddress of this listener.
-func (l *listener) Multiaddr() ma.Multiaddr {
- return l.localMultiaddr
+ return l.reuseListener.Addr()
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/quic_multiaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/quic_multiaddr.go
deleted file mode 100644
index 81b66af8a..000000000
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/quic_multiaddr.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package libp2pquic
-
-import (
- "net"
-
- ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr/net"
-)
-
-var quicMA ma.Multiaddr
-
-func init() {
- var err error
- quicMA, err = ma.NewMultiaddr("/quic")
- if err != nil {
- panic(err)
- }
-}
-
-func toQuicMultiaddr(na net.Addr) (ma.Multiaddr, error) {
- udpMA, err := manet.FromNetAddr(na)
- if err != nil {
- return nil, err
- }
- return udpMA.Encapsulate(quicMA), nil
-}
-
-func fromQuicMultiaddr(addr ma.Multiaddr) (net.Addr, error) {
- return manet.ToNetAddr(addr.Decapsulate(quicMA))
-}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go
index 5d276dab8..56f12dade 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go
@@ -5,7 +5,7 @@ import (
"github.com/libp2p/go-libp2p/core/network"
- "github.com/lucas-clemente/quic-go"
+ "github.com/quic-go/quic-go"
)
const (
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go
index 3702c5055..f279aed75 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go
@@ -2,16 +2,14 @@ package libp2pquic
import (
"context"
+ "crypto/tls"
"errors"
"fmt"
- "io"
"math/rand"
"net"
"sync"
"time"
- "golang.org/x/crypto/hkdf"
-
"github.com/libp2p/go-libp2p/core/connmgr"
ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
@@ -19,105 +17,44 @@ import (
"github.com/libp2p/go-libp2p/core/pnet"
tpt "github.com/libp2p/go-libp2p/core/transport"
p2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
mafmt "github.com/multiformats/go-multiaddr-fmt"
manet "github.com/multiformats/go-multiaddr/net"
-
- logging "github.com/ipfs/go-log/v2"
- "github.com/lucas-clemente/quic-go"
- "github.com/minio/sha256-simd"
+ "github.com/quic-go/quic-go"
)
var log = logging.Logger("quic-transport")
var ErrHolePunching = errors.New("hole punching attempted; no active dial")
-var quicDialContext = quic.DialContext // so we can mock it in tests
-
var HolePunchTimeout = 5 * time.Second
-var quicConfig = &quic.Config{
- MaxIncomingStreams: 256,
- MaxIncomingUniStreams: -1, // disable unidirectional streams
- MaxStreamReceiveWindow: 10 * (1 << 20), // 10 MB
- MaxConnectionReceiveWindow: 15 * (1 << 20), // 15 MB
- AcceptToken: func(clientAddr net.Addr, _ *quic.Token) bool {
- // TODO(#6): require source address validation when under load
- return true
- },
- KeepAlivePeriod: 15 * time.Second,
- Versions: []quic.VersionNumber{quic.VersionDraft29, quic.Version1},
-}
-
-const statelessResetKeyInfo = "libp2p quic stateless reset key"
const errorCodeConnectionGating = 0x47415445 // GATE in ASCII
-type connManager struct {
- reuseUDP4 *reuse
- reuseUDP6 *reuse
-}
-
-func newConnManager() (*connManager, error) {
- reuseUDP4 := newReuse()
- reuseUDP6 := newReuse()
-
- return &connManager{
- reuseUDP4: reuseUDP4,
- reuseUDP6: reuseUDP6,
- }, nil
-}
-
-func (c *connManager) getReuse(network string) (*reuse, error) {
- switch network {
- case "udp4":
- return c.reuseUDP4, nil
- case "udp6":
- return c.reuseUDP6, nil
- default:
- return nil, errors.New("invalid network: must be either udp4 or udp6")
- }
-}
-
-func (c *connManager) Listen(network string, laddr *net.UDPAddr) (*reuseConn, error) {
- reuse, err := c.getReuse(network)
- if err != nil {
- return nil, err
- }
- return reuse.Listen(network, laddr)
-}
-
-func (c *connManager) Dial(network string, raddr *net.UDPAddr) (*reuseConn, error) {
- reuse, err := c.getReuse(network)
- if err != nil {
- return nil, err
- }
- return reuse.Dial(network, raddr)
-}
-
-func (c *connManager) Close() error {
- if err := c.reuseUDP6.Close(); err != nil {
- return err
- }
- return c.reuseUDP4.Close()
-}
-
// The Transport implements the tpt.Transport interface for QUIC connections.
type transport struct {
- privKey ic.PrivKey
- localPeer peer.ID
- identity *p2ptls.Identity
- connManager *connManager
- serverConfig *quic.Config
- clientConfig *quic.Config
- gater connmgr.ConnectionGater
- rcmgr network.ResourceManager
+ privKey ic.PrivKey
+ localPeer peer.ID
+ identity *p2ptls.Identity
+ connManager *quicreuse.ConnManager
+ gater connmgr.ConnectionGater
+ rcmgr network.ResourceManager
holePunchingMx sync.Mutex
holePunching map[holePunchKey]*activeHolePunch
+ rndMx sync.Mutex
+ rnd rand.Rand
+
connMx sync.Mutex
conns map[quic.Connection]*conn
+
+ listenersMu sync.Mutex
+ // map of UDPAddr as string to a virtualListeners
+ listeners map[string][]*virtualListener
}
var _ tpt.Transport = &transport{}
@@ -133,7 +70,7 @@ type activeHolePunch struct {
}
// NewTransport creates a new QUIC transport
-func NewTransport(key ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager) (tpt.Transport, error) {
+func NewTransport(key ic.PrivKey, connManager *quicreuse.ConnManager, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager) (tpt.Transport, error) {
if len(psk) > 0 {
log.Error("QUIC doesn't support private networks yet.")
return nil, errors.New("QUIC doesn't support private networks yet")
@@ -146,26 +83,12 @@ func NewTransport(key ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, r
if err != nil {
return nil, err
}
- connManager, err := newConnManager()
- if err != nil {
- return nil, err
- }
+
if rcmgr == nil {
- rcmgr = network.NullResourceManager
- }
- config := quicConfig.Clone()
- keyBytes, err := key.Raw()
- if err != nil {
- return nil, err
- }
- keyReader := hkdf.New(sha256.New, keyBytes, nil, []byte(statelessResetKeyInfo))
- config.StatelessResetKey = make([]byte, 32)
- if _, err := io.ReadFull(keyReader, config.StatelessResetKey); err != nil {
- return nil, err
+ rcmgr = &network.NullResourceManager{}
}
- config.Tracer = tracer
- tr := &transport{
+ return &transport{
privKey: key,
localPeer: localPeer,
identity: identity,
@@ -174,30 +97,16 @@ func NewTransport(key ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, r
rcmgr: rcmgr,
conns: make(map[quic.Connection]*conn),
holePunching: make(map[holePunchKey]*activeHolePunch),
- }
- config.AllowConnectionWindowIncrease = tr.allowWindowIncrease
- tr.serverConfig = config
- tr.clientConfig = config.Clone()
- return tr, nil
+ rnd: *rand.New(rand.NewSource(time.Now().UnixNano())),
+
+ listeners: make(map[string][]*virtualListener),
+ }, nil
}
// Dial dials a new QUIC connection
-func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) {
- netw, host, err := manet.DialArgs(raddr)
- if err != nil {
- return nil, err
- }
- addr, err := net.ResolveUDPAddr(netw, host)
- if err != nil {
- return nil, err
- }
- remoteMultiaddr, err := toQuicMultiaddr(addr)
- if err != nil {
- return nil, err
- }
- tlsConf, keyCh := t.identity.ConfigForPeer(p)
+func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (_c tpt.CapableConn, _err error) {
if ok, isClient, _ := network.GetSimultaneousConnect(ctx); ok && !isClient {
- return t.holePunch(ctx, netw, addr, p)
+ return t.holePunch(ctx, raddr, p)
}
scope, err := t.rcmgr.OpenConnection(network.DirOutbound, false, raddr)
@@ -205,21 +114,27 @@ func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tp
log.Debugw("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err)
return nil, err
}
- if err := scope.SetPeer(p); err != nil {
- log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err)
+
+ c, err := t.dialWithScope(ctx, raddr, p, scope)
+ if err != nil {
scope.Done()
return nil, err
}
- pconn, err := t.connManager.Dial(netw, addr)
- if err != nil {
+ return c, nil
+}
+
+func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (tpt.CapableConn, error) {
+ if err := scope.SetPeer(p); err != nil {
+ log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err)
return nil, err
}
- qconn, err := quicDialContext(ctx, pconn, addr, host, tlsConf, t.clientConfig)
+
+ tlsConf, keyCh := t.identity.ConfigForPeer(p)
+ pconn, err := t.connManager.DialQUIC(ctx, raddr, tlsConf, t.allowWindowIncrease)
if err != nil {
- scope.Done()
- pconn.DecreaseCount()
return nil, err
}
+
// Should be ready by this point, don't block.
var remotePubKey ic.PubKey
select {
@@ -227,33 +142,30 @@ func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tp
default:
}
if remotePubKey == nil {
- pconn.DecreaseCount()
- scope.Done()
+ pconn.CloseWithError(1, "")
return nil, errors.New("p2p/transport/quic BUG: expected remote pub key to be set")
}
- localMultiaddr, err := toQuicMultiaddr(pconn.LocalAddr())
+ localMultiaddr, err := quicreuse.ToQuicMultiaddr(pconn.LocalAddr(), pconn.ConnectionState().Version)
if err != nil {
- qconn.CloseWithError(0, "")
+ pconn.CloseWithError(1, "")
return nil, err
}
c := &conn{
- quicConn: qconn,
- pconn: pconn,
+ quicConn: pconn,
transport: t,
scope: scope,
- privKey: t.privKey,
localPeer: t.localPeer,
localMultiaddr: localMultiaddr,
remotePubKey: remotePubKey,
remotePeerID: p,
- remoteMultiaddr: remoteMultiaddr,
+ remoteMultiaddr: raddr,
}
if t.gater != nil && !t.gater.InterceptSecured(network.DirOutbound, p, c) {
- qconn.CloseWithError(errorCodeConnectionGating, "connection gated")
+ pconn.CloseWithError(errorCodeConnectionGating, "connection gated")
return nil, fmt.Errorf("secured connection gated")
}
- t.addConn(qconn, c)
+ t.addConn(pconn, c)
return c, nil
}
@@ -269,7 +181,15 @@ func (t *transport) removeConn(conn quic.Connection) {
t.connMx.Unlock()
}
-func (t *transport) holePunch(ctx context.Context, network string, addr *net.UDPAddr, p peer.ID) (tpt.CapableConn, error) {
+func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) {
+ network, saddr, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+ addr, err := net.ResolveUDPAddr(network, saddr)
+ if err != nil {
+ return nil, err
+ }
pconn, err := t.connManager.Dial(network, addr)
if err != nil {
return nil, err
@@ -300,11 +220,14 @@ func (t *transport) holePunch(ctx context.Context, network string, addr *net.UDP
var punchErr error
loop:
for i := 0; ; i++ {
- if _, err := rand.Read(payload); err != nil {
+ t.rndMx.Lock()
+ _, err := t.rnd.Read(payload)
+ t.rndMx.Unlock()
+ if err != nil {
punchErr = err
break
}
- if _, err := pconn.UDPConn.WriteToUDP(payload, addr); err != nil {
+ if _, err := pconn.WriteTo(payload, addr); err != nil {
punchErr = err
break
}
@@ -346,7 +269,7 @@ loop:
}
// Don't use mafmt.QUIC as we don't want to dial DNS addresses. Just /ip{4,6}/udp/quic
-var dialMatcher = mafmt.And(mafmt.IP, mafmt.Base(ma.P_UDP), mafmt.Base(ma.P_QUIC))
+var dialMatcher = mafmt.And(mafmt.IP, mafmt.Base(ma.P_UDP), mafmt.Or(mafmt.Base(ma.P_QUIC), mafmt.Base(ma.P_QUIC_V1)))
// CanDial determines if we can dial to an address
func (t *transport) CanDial(addr ma.Multiaddr) bool {
@@ -355,24 +278,65 @@ func (t *transport) CanDial(addr ma.Multiaddr) bool {
// Listen listens for new QUIC connections on the passed multiaddr.
func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
- lnet, host, err := manet.DialArgs(addr)
+ var tlsConf tls.Config
+ tlsConf.GetConfigForClient = func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
+ // return a tls.Config that verifies the peer's certificate chain.
+ // Note that since we have no way of associating an incoming QUIC connection with
+ // the peer ID calculated here, we don't actually receive the peer's public key
+ // from the key chan.
+ conf, _ := t.identity.ConfigForPeer("")
+ return conf, nil
+ }
+ tlsConf.NextProtos = []string{"libp2p"}
+ udpAddr, version, err := quicreuse.FromQuicMultiaddr(addr)
if err != nil {
return nil, err
}
- laddr, err := net.ResolveUDPAddr(lnet, host)
- if err != nil {
- return nil, err
- }
- conn, err := t.connManager.Listen(lnet, laddr)
- if err != nil {
- return nil, err
+
+ t.listenersMu.Lock()
+ defer t.listenersMu.Unlock()
+ listeners := t.listeners[udpAddr.String()]
+ var underlyingListener *listener
+ var acceptRunner *acceptLoopRunner
+ if len(listeners) != 0 {
+ // We already have an underlying listener, let's use it
+ underlyingListener = listeners[0].listener
+ acceptRunner = listeners[0].acceptRunnner
+ // Make sure our underlying listener is listening on the specified QUIC version
+ if _, ok := underlyingListener.localMultiaddrs[version]; !ok {
+ return nil, fmt.Errorf("can't listen on quic version %v, underlying listener doesn't support it", version)
+ }
+ } else {
+ ln, err := t.connManager.ListenQUIC(addr, &tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, err
+ }
+ l, err := newListener(ln, t, t.localPeer, t.privKey, t.rcmgr)
+ if err != nil {
+ _ = ln.Close()
+ return nil, err
+ }
+ underlyingListener = &l
+
+ acceptRunner = &acceptLoopRunner{
+ acceptSem: make(chan struct{}, 1),
+ muxer: make(map[quic.VersionNumber]chan acceptVal),
+ }
}
- ln, err := newListener(conn, t, t.localPeer, t.privKey, t.identity, t.rcmgr)
- if err != nil {
- conn.DecreaseCount()
- return nil, err
+
+ l := &virtualListener{
+ listener: underlyingListener,
+ version: version,
+ udpAddr: udpAddr.String(),
+ t: t,
+ acceptRunnner: acceptRunner,
+ acceptChan: acceptRunner.AcceptForVersion(version),
}
- return ln, nil
+
+ listeners = append(listeners, l)
+ t.listeners[udpAddr.String()] = listeners
+
+ return l, nil
}
func (t *transport) allowWindowIncrease(conn quic.Connection, size uint64) bool {
@@ -396,7 +360,7 @@ func (t *transport) Proxy() bool {
// Protocols returns the set of protocols handled by this transport.
func (t *transport) Protocols() []int {
- return []int{ma.P_QUIC}
+ return t.connManager.Protocols()
}
func (t *transport) String() string {
@@ -404,5 +368,32 @@ func (t *transport) String() string {
}
func (t *transport) Close() error {
- return t.connManager.Close()
+ return nil
+}
+
+func (t *transport) CloseVirtualListener(l *virtualListener) error {
+ t.listenersMu.Lock()
+ defer t.listenersMu.Unlock()
+
+ var err error
+ listeners := t.listeners[l.udpAddr]
+ if len(listeners) == 1 {
+ // This is the last virtual listener here, so we can close the underlying listener
+ err = l.listener.Close()
+ delete(t.listeners, l.udpAddr)
+ return err
+ }
+
+ for i := 0; i < len(listeners); i++ {
+ // Swap remove
+ if l == listeners[i] {
+ listeners[i] = listeners[len(listeners)-1]
+ listeners = listeners[:len(listeners)-1]
+ t.listeners[l.udpAddr] = listeners
+ break
+ }
+ }
+
+ return nil
+
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go
new file mode 100644
index 000000000..8aa2a0c1e
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go
@@ -0,0 +1,175 @@
+package libp2pquic
+
+import (
+ "sync"
+
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+const acceptBufferPerVersion = 4
+
+// virtualListener is a listener that exposes a single multiaddr but uses another listener under the hood
+type virtualListener struct {
+ *listener
+ udpAddr string
+ version quic.VersionNumber
+ t *transport
+ acceptRunnner *acceptLoopRunner
+ acceptChan chan acceptVal
+}
+
+var _ tpt.Listener = &virtualListener{}
+
+func (l *virtualListener) Multiaddr() ma.Multiaddr {
+ return l.listener.localMultiaddrs[l.version]
+}
+
+func (l *virtualListener) Close() error {
+ l.acceptRunnner.RmAcceptForVersion(l.version, tpt.ErrListenerClosed)
+ return l.t.CloseVirtualListener(l)
+}
+
+func (l *virtualListener) Accept() (tpt.CapableConn, error) {
+ return l.acceptRunnner.Accept(l.listener, l.version, l.acceptChan)
+}
+
+type acceptVal struct {
+ conn tpt.CapableConn
+ err error
+}
+
+type acceptLoopRunner struct {
+ acceptSem chan struct{}
+
+ muxerMu sync.Mutex
+ muxer map[quic.VersionNumber]chan acceptVal
+ muxerClosed bool
+}
+
+func (r *acceptLoopRunner) AcceptForVersion(v quic.VersionNumber) chan acceptVal {
+ r.muxerMu.Lock()
+ defer r.muxerMu.Unlock()
+
+ ch := make(chan acceptVal, acceptBufferPerVersion)
+
+ if _, ok := r.muxer[v]; ok {
+ panic("unexpected chan already found in accept muxer")
+ }
+
+ r.muxer[v] = ch
+ return ch
+}
+
+func (r *acceptLoopRunner) RmAcceptForVersion(v quic.VersionNumber, err error) {
+ r.muxerMu.Lock()
+ defer r.muxerMu.Unlock()
+
+ if r.muxerClosed {
+ // Already closed, all versions are removed
+ return
+ }
+
+ ch, ok := r.muxer[v]
+ if !ok {
+ panic("expected chan in accept muxer")
+ }
+ ch <- acceptVal{err: err}
+ delete(r.muxer, v)
+}
+
+func (r *acceptLoopRunner) sendErrAndClose(err error) {
+ r.muxerMu.Lock()
+ defer r.muxerMu.Unlock()
+ r.muxerClosed = true
+ for k, ch := range r.muxer {
+ select {
+ case ch <- acceptVal{err: err}:
+ default:
+ }
+ delete(r.muxer, k)
+ close(ch)
+ }
+}
+
+// innerAccept is the inner logic of the Accept loop. Assume caller holds the
+// acceptSemaphore. May return both a nil conn and nil error if it didn't find a
+// conn with the expected version
+func (r *acceptLoopRunner) innerAccept(l *listener, expectedVersion quic.VersionNumber, bufferedConnChan chan acceptVal) (tpt.CapableConn, error) {
+ select {
+ // Check if we have a buffered connection first from an earlier Accept call
+ case v, ok := <-bufferedConnChan:
+ if !ok {
+ return nil, tpt.ErrListenerClosed
+ }
+ return v.conn, v.err
+ default:
+ }
+
+ conn, err := l.Accept()
+
+ if err != nil {
+ r.sendErrAndClose(err)
+ return nil, err
+ }
+
+ _, version, err := quicreuse.FromQuicMultiaddr(conn.RemoteMultiaddr())
+ if err != nil {
+ r.sendErrAndClose(err)
+ return nil, err
+ }
+
+ if version == expectedVersion {
+ return conn, nil
+ }
+
+ // This wasn't the version we were expecting, lets queue it up for a
+ // future Accept call with a different version
+ r.muxerMu.Lock()
+ ch, ok := r.muxer[version]
+ r.muxerMu.Unlock()
+
+ if !ok {
+ // Nothing to handle this connection version. Close it
+ conn.Close()
+ return nil, nil
+ }
+
+ // Non blocking
+ select {
+ case ch <- acceptVal{conn: conn}:
+ default:
+ // accept queue filled up, drop the connection
+ conn.Close()
+ log.Warn("Accept queue filled. Dropping connection.")
+ }
+
+ return nil, nil
+}
+
+func (r *acceptLoopRunner) Accept(l *listener, expectedVersion quic.VersionNumber, bufferedConnChan chan acceptVal) (tpt.CapableConn, error) {
+ for {
+ var conn tpt.CapableConn
+ var err error
+ select {
+ case r.acceptSem <- struct{}{}:
+ conn, err = r.innerAccept(l, expectedVersion, bufferedConnChan)
+ <-r.acceptSem
+
+ if conn == nil && err == nil {
+ // Didn't find a conn for the expected version and there was no error, lets try again
+ continue
+ }
+ case v, ok := <-bufferedConnChan:
+ if !ok {
+ return nil, tpt.ErrListenerClosed
+ }
+ conn = v.conn
+ err = v.err
+ }
+ return conn, err
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go
new file mode 100644
index 000000000..76a2c8cc4
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go
@@ -0,0 +1,25 @@
+package quicreuse
+
+import (
+ "net"
+ "time"
+
+ "github.com/quic-go/quic-go"
+)
+
+var quicConfig = &quic.Config{
+ MaxIncomingStreams: 256,
+ MaxIncomingUniStreams: 5, // allow some unidirectional streams, in case we speak WebTransport
+ MaxStreamReceiveWindow: 10 * (1 << 20), // 10 MB
+ MaxConnectionReceiveWindow: 15 * (1 << 20), // 15 MB
+ RequireAddressValidation: func(net.Addr) bool {
+ // TODO(#1535): require source address validation when under load
+ return false
+ },
+ KeepAlivePeriod: 15 * time.Second,
+ Versions: []quic.VersionNumber{quic.VersionDraft29, quic.Version1},
+ // We don't use datagrams (yet), but this is necessary for WebTransport
+ EnableDatagrams: true,
+ // The multiaddress encodes the QUIC version, thus there's no need to send Version Negotiation packets.
+ DisableVersionNegotiationPackets: true,
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go
new file mode 100644
index 000000000..0e2793eee
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go
@@ -0,0 +1,234 @@
+package quicreuse
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "sync"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+ quiclogging "github.com/quic-go/quic-go/logging"
+)
+
+var quicDialContext = quic.DialContext // so we can mock it in tests
+
+type ConnManager struct {
+ reuseUDP4 *reuse
+ reuseUDP6 *reuse
+ enableDraft29 bool
+ enableReuseport bool
+ enableMetrics bool
+
+ serverConfig *quic.Config
+ clientConfig *quic.Config
+
+ connsMu sync.Mutex
+ conns map[string]connListenerEntry
+}
+
+type connListenerEntry struct {
+ refCount int
+ ln *connListener
+}
+
+func NewConnManager(statelessResetKey quic.StatelessResetKey, opts ...Option) (*ConnManager, error) {
+ cm := &ConnManager{
+ enableReuseport: true,
+ enableDraft29: true,
+ conns: make(map[string]connListenerEntry),
+ }
+ for _, o := range opts {
+ if err := o(cm); err != nil {
+ return nil, err
+ }
+ }
+
+ quicConf := quicConfig.Clone()
+ quicConf.StatelessResetKey = &statelessResetKey
+
+ var tracers []quiclogging.Tracer
+ if qlogTracer != nil {
+ tracers = append(tracers, qlogTracer)
+ }
+ if cm.enableMetrics {
+ tracers = append(tracers, newMetricsTracer())
+ }
+ if len(tracers) > 0 {
+ quicConf.Tracer = quiclogging.NewMultiplexedTracer(tracers...)
+ }
+ serverConfig := quicConf.Clone()
+ if !cm.enableDraft29 {
+ serverConfig.Versions = []quic.VersionNumber{quic.Version1}
+ }
+
+ cm.clientConfig = quicConf
+ cm.serverConfig = serverConfig
+ if cm.enableReuseport {
+ cm.reuseUDP4 = newReuse()
+ cm.reuseUDP6 = newReuse()
+ }
+ return cm, nil
+}
+
+func (c *ConnManager) getReuse(network string) (*reuse, error) {
+ switch network {
+ case "udp4":
+ return c.reuseUDP4, nil
+ case "udp6":
+ return c.reuseUDP6, nil
+ default:
+ return nil, errors.New("invalid network: must be either udp4 or udp6")
+ }
+}
+
+func (c *ConnManager) ListenQUIC(addr ma.Multiaddr, tlsConf *tls.Config, allowWindowIncrease func(conn quic.Connection, delta uint64) bool) (Listener, error) {
+ if !c.enableDraft29 {
+ if _, err := addr.ValueForProtocol(ma.P_QUIC); err == nil {
+ return nil, errors.New("can't listen on `/quic` multiaddr (QUIC draft 29 version) when draft 29 support is disabled")
+ }
+ }
+
+ netw, host, err := manet.DialArgs(addr)
+ if err != nil {
+ return nil, err
+ }
+ laddr, err := net.ResolveUDPAddr(netw, host)
+ if err != nil {
+ return nil, err
+ }
+
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ key := laddr.String()
+ entry, ok := c.conns[key]
+ if !ok {
+ conn, err := c.listen(netw, laddr)
+ if err != nil {
+ return nil, err
+ }
+ ln, err := newConnListener(conn, c.serverConfig, c.enableDraft29)
+ if err != nil {
+ return nil, err
+ }
+ key = conn.LocalAddr().String()
+ entry = connListenerEntry{ln: ln}
+ }
+ l, err := entry.ln.Add(tlsConf, allowWindowIncrease, func() { c.onListenerClosed(key) })
+ if err != nil {
+ if entry.refCount <= 0 {
+ entry.ln.Close()
+ }
+ return nil, err
+ }
+ entry.refCount++
+ c.conns[key] = entry
+ return l, nil
+}
+
+func (c *ConnManager) onListenerClosed(key string) {
+ c.connsMu.Lock()
+ defer c.connsMu.Unlock()
+
+ entry := c.conns[key]
+ entry.refCount = entry.refCount - 1
+ if entry.refCount <= 0 {
+ delete(c.conns, key)
+ entry.ln.Close()
+ } else {
+ c.conns[key] = entry
+ }
+}
+
+func (c *ConnManager) listen(network string, laddr *net.UDPAddr) (pConn, error) {
+ if c.enableReuseport {
+ reuse, err := c.getReuse(network)
+ if err != nil {
+ return nil, err
+ }
+ return reuse.Listen(network, laddr)
+ }
+
+ conn, err := net.ListenUDP(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return &noreuseConn{conn}, nil
+}
+
+func (c *ConnManager) DialQUIC(ctx context.Context, raddr ma.Multiaddr, tlsConf *tls.Config, allowWindowIncrease func(conn quic.Connection, delta uint64) bool) (quic.Connection, error) {
+ naddr, v, err := FromQuicMultiaddr(raddr)
+ if err != nil {
+ return nil, err
+ }
+ netw, host, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+
+ quicConf := c.clientConfig.Clone()
+ quicConf.AllowConnectionWindowIncrease = allowWindowIncrease
+
+ if v == quic.Version1 {
+ // The endpoint has explicit support for QUIC v1, so we'll only use that version.
+ quicConf.Versions = []quic.VersionNumber{quic.Version1}
+ } else if v == quic.VersionDraft29 {
+ quicConf.Versions = []quic.VersionNumber{quic.VersionDraft29}
+ } else {
+ return nil, errors.New("unknown QUIC version")
+ }
+
+ pconn, err := c.Dial(netw, naddr)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := quicDialContext(ctx, pconn, naddr, host, tlsConf, quicConf)
+ if err != nil {
+ pconn.DecreaseCount()
+ return nil, err
+ }
+ return conn, nil
+}
+
+func (c *ConnManager) Dial(network string, raddr *net.UDPAddr) (pConn, error) {
+ if c.enableReuseport {
+ reuse, err := c.getReuse(network)
+ if err != nil {
+ return nil, err
+ }
+ return reuse.Dial(network, raddr)
+ }
+
+ var laddr *net.UDPAddr
+ switch network {
+ case "udp4":
+ laddr = &net.UDPAddr{IP: net.IPv4zero, Port: 0}
+ case "udp6":
+ laddr = &net.UDPAddr{IP: net.IPv6zero, Port: 0}
+ }
+ conn, err := net.ListenUDP(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return &noreuseConn{conn}, nil
+}
+
+func (c *ConnManager) Protocols() []int {
+ if c.enableDraft29 {
+ return []int{ma.P_QUIC, ma.P_QUIC_V1}
+ }
+ return []int{ma.P_QUIC_V1}
+}
+
+func (c *ConnManager) Close() error {
+ if !c.enableReuseport {
+ return nil
+ }
+ if err := c.reuseUDP6.Close(); err != nil {
+ return err
+ }
+ return c.reuseUDP4.Close()
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go
new file mode 100644
index 000000000..e7c010171
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go
@@ -0,0 +1,227 @@
+package quicreuse
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/transport"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+var quicListen = quic.Listen // so we can mock it in tests
+
+type Listener interface {
+ Accept(context.Context) (quic.Connection, error)
+ Addr() net.Addr
+ Multiaddrs() []ma.Multiaddr
+ io.Closer
+}
+
+type protoConf struct {
+ ln *listener
+ tlsConf *tls.Config
+ allowWindowIncrease func(conn quic.Connection, delta uint64) bool
+}
+
+type connListener struct {
+ l quic.Listener
+ conn pConn
+ running chan struct{}
+ addrs []ma.Multiaddr
+
+ protocolsMu sync.Mutex
+ protocols map[string]protoConf
+}
+
+func newConnListener(c pConn, quicConfig *quic.Config, enableDraft29 bool) (*connListener, error) {
+ localMultiaddrs := make([]ma.Multiaddr, 0, 2)
+ a, err := ToQuicMultiaddr(c.LocalAddr(), quic.Version1)
+ if err != nil {
+ return nil, err
+ }
+ localMultiaddrs = append(localMultiaddrs, a)
+ if enableDraft29 {
+ a, err := ToQuicMultiaddr(c.LocalAddr(), quic.VersionDraft29)
+ if err != nil {
+ return nil, err
+ }
+ localMultiaddrs = append(localMultiaddrs, a)
+ }
+ cl := &connListener{
+ protocols: map[string]protoConf{},
+ running: make(chan struct{}),
+ conn: c,
+ addrs: localMultiaddrs,
+ }
+ tlsConf := &tls.Config{
+ GetConfigForClient: func(info *tls.ClientHelloInfo) (*tls.Config, error) {
+ cl.protocolsMu.Lock()
+ defer cl.protocolsMu.Unlock()
+ for _, proto := range info.SupportedProtos {
+ if entry, ok := cl.protocols[proto]; ok {
+ conf := entry.tlsConf
+ if conf.GetConfigForClient != nil {
+ return conf.GetConfigForClient(info)
+ }
+ return conf, nil
+ }
+ }
+ return nil, fmt.Errorf("no supported protocol found. offered: %+v", info.SupportedProtos)
+ },
+ }
+ quicConf := quicConfig.Clone()
+ quicConf.AllowConnectionWindowIncrease = cl.allowWindowIncrease
+ ln, err := quicListen(c, tlsConf, quicConf)
+ if err != nil {
+ return nil, err
+ }
+ cl.l = ln
+ go cl.Run() // This go routine shuts down once the underlying quic.Listener is closed (or returns an error).
+ return cl, nil
+}
+
+func (l *connListener) allowWindowIncrease(conn quic.Connection, delta uint64) bool {
+ l.protocolsMu.Lock()
+ defer l.protocolsMu.Unlock()
+
+ conf, ok := l.protocols[conn.ConnectionState().TLS.ConnectionState.NegotiatedProtocol]
+ if !ok {
+ return false
+ }
+ return conf.allowWindowIncrease(conn, delta)
+}
+
+func (l *connListener) Add(tlsConf *tls.Config, allowWindowIncrease func(conn quic.Connection, delta uint64) bool, onRemove func()) (Listener, error) {
+ l.protocolsMu.Lock()
+ defer l.protocolsMu.Unlock()
+
+ if len(tlsConf.NextProtos) == 0 {
+ return nil, errors.New("no ALPN found in tls.Config")
+ }
+
+ for _, proto := range tlsConf.NextProtos {
+ if _, ok := l.protocols[proto]; ok {
+ return nil, fmt.Errorf("already listening for protocol %s", proto)
+ }
+ }
+
+ ln := newSingleListener(l.l.Addr(), l.addrs, func() {
+ l.protocolsMu.Lock()
+ for _, proto := range tlsConf.NextProtos {
+ delete(l.protocols, proto)
+ }
+ l.protocolsMu.Unlock()
+ onRemove()
+ }, l.running)
+ for _, proto := range tlsConf.NextProtos {
+ l.protocols[proto] = protoConf{
+ ln: ln,
+ tlsConf: tlsConf,
+ allowWindowIncrease: allowWindowIncrease,
+ }
+ }
+ return ln, nil
+}
+
+func (l *connListener) Run() error {
+ defer close(l.running)
+ defer l.conn.DecreaseCount()
+ for {
+ conn, err := l.l.Accept(context.Background())
+ if err != nil {
+ if errors.Is(err, quic.ErrServerClosed) || strings.Contains(err.Error(), "use of closed network connection") {
+ return transport.ErrListenerClosed
+ }
+ return err
+ }
+ proto := conn.ConnectionState().TLS.NegotiatedProtocol
+
+ l.protocolsMu.Lock()
+ ln, ok := l.protocols[proto]
+ if !ok {
+ l.protocolsMu.Unlock()
+ return fmt.Errorf("negotiated unknown protocol: %s", proto)
+ }
+ ln.ln.add(conn)
+ l.protocolsMu.Unlock()
+ }
+}
+
+func (l *connListener) Close() error {
+ err := l.l.Close()
+ <-l.running // wait for Run to return
+ return err
+}
+
+const queueLen = 16
+
+// A listener for a single ALPN protocol (set).
+type listener struct {
+ queue chan quic.Connection
+ acceptLoopRunning chan struct{}
+ addr net.Addr
+ addrs []ma.Multiaddr
+ remove func()
+ closeOnce sync.Once
+}
+
+var _ Listener = &listener{}
+
+func newSingleListener(addr net.Addr, addrs []ma.Multiaddr, remove func(), running chan struct{}) *listener {
+ return &listener{
+ queue: make(chan quic.Connection, queueLen),
+ acceptLoopRunning: running,
+ remove: remove,
+ addr: addr,
+ addrs: addrs,
+ }
+}
+
+func (l *listener) add(c quic.Connection) {
+ select {
+ case l.queue <- c:
+ default:
+ c.CloseWithError(1, "queue full")
+ }
+}
+
+func (l *listener) Accept(ctx context.Context) (quic.Connection, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-l.acceptLoopRunning:
+ return nil, transport.ErrListenerClosed
+ case c, ok := <-l.queue:
+ if !ok {
+ return nil, transport.ErrListenerClosed
+ }
+ return c, nil
+ }
+}
+
+func (l *listener) Addr() net.Addr {
+ return l.addr
+}
+
+func (l *listener) Multiaddrs() []ma.Multiaddr {
+ return l.addrs
+}
+
+func (l *listener) Close() error {
+ l.closeOnce.Do(func() {
+ l.remove()
+ close(l.queue)
+ // drain the queue
+ for conn := range l.queue {
+ conn.CloseWithError(1, "closing")
+ }
+ })
+ return nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go
new file mode 100644
index 000000000..a700a0544
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go
@@ -0,0 +1,28 @@
+package quicreuse
+
+type Option func(*ConnManager) error
+
+func DisableReuseport() Option {
+ return func(m *ConnManager) error {
+ m.enableReuseport = false
+ return nil
+ }
+}
+
+// DisableDraft29 disables support for QUIC draft-29.
+// This option should be set, unless support for this legacy QUIC version is needed for backwards compatibility.
+// Support for QUIC draft-29 is already deprecated and will be removed in the future, see https://github.com/libp2p/go-libp2p/issues/1841.
+func DisableDraft29() Option {
+ return func(m *ConnManager) error {
+ m.enableDraft29 = false
+ return nil
+ }
+}
+
+// EnableMetrics enables Prometheus metrics collection.
+func EnableMetrics() Option {
+ return func(m *ConnManager) error {
+ m.enableMetrics = true
+ return nil
+ }
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go
new file mode 100644
index 000000000..12eb7d8ab
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go
@@ -0,0 +1,64 @@
+package quicreuse
+
+import (
+ "errors"
+ "net"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+)
+
+var (
+ quicV1MA = ma.StringCast("/quic-v1")
+ quicDraft29MA = ma.StringCast("/quic")
+)
+
+func ToQuicMultiaddr(na net.Addr, version quic.VersionNumber) (ma.Multiaddr, error) {
+ udpMA, err := manet.FromNetAddr(na)
+ if err != nil {
+ return nil, err
+ }
+ switch version {
+ case quic.VersionDraft29:
+ return udpMA.Encapsulate(quicDraft29MA), nil
+ case quic.Version1:
+ return udpMA.Encapsulate(quicV1MA), nil
+ default:
+ return nil, errors.New("unknown QUIC version")
+ }
+}
+
+func FromQuicMultiaddr(addr ma.Multiaddr) (*net.UDPAddr, quic.VersionNumber, error) {
+ var version quic.VersionNumber
+ var partsBeforeQUIC []ma.Multiaddr
+ ma.ForEach(addr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_QUIC:
+ version = quic.VersionDraft29
+ return false
+ case ma.P_QUIC_V1:
+ version = quic.Version1
+ return false
+ default:
+ partsBeforeQUIC = append(partsBeforeQUIC, &c)
+ return true
+ }
+ })
+ if len(partsBeforeQUIC) == 0 {
+ return nil, version, errors.New("no addr before QUIC component")
+ }
+ if version == 0 {
+ // Not found
+ return nil, version, errors.New("unknown QUIC version")
+ }
+ netAddr, err := manet.ToNetAddr(ma.Join(partsBeforeQUIC...))
+ if err != nil {
+ return nil, version, err
+ }
+ udpAddr, ok := netAddr.(*net.UDPAddr)
+ if !ok {
+ return nil, 0, errors.New("not a *net.UDPAddr")
+ }
+ return udpAddr, version, nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/reuse.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go
similarity index 63%
rename from vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/reuse.go
rename to vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go
index 43eb2cd36..cc90038ef 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/reuse.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go
@@ -1,4 +1,4 @@
-package libp2pquic
+package quicreuse
import (
"net"
@@ -9,6 +9,23 @@ import (
"github.com/libp2p/go-netroute"
)
+type pConn interface {
+ net.PacketConn
+
+ // count conn reference
+ DecreaseCount()
+ IncreaseCount()
+}
+
+type noreuseConn struct {
+ *net.UDPConn
+}
+
+func (c *noreuseConn) IncreaseCount() {}
+func (c *noreuseConn) DecreaseCount() {
+ c.UDPConn.Close()
+}
+
// Constant. Defined as variables to simplify testing.
var (
garbageCollectInterval = 30 * time.Second
@@ -57,16 +74,21 @@ type reuse struct {
routes routing.Router
unicast map[string] /* IP.String() */ map[int] /* port */ *reuseConn
- // global contains connections that are listening on 0.0.0.0 / ::
- global map[int]*reuseConn
+ // globalListeners contains connections that are listening on 0.0.0.0 / ::
+ globalListeners map[int]*reuseConn
+ // globalDialers contains connections that we've dialed out from. These connections are listening on 0.0.0.0 / ::
+ // On Dial, connections are reused from this map if no connection is available in the globalListeners
+ // On Listen, connections are reused from this map if the requested port is 0, and then moved to globalListeners
+ globalDialers map[int]*reuseConn
}
func newReuse() *reuse {
r := &reuse{
- unicast: make(map[string]map[int]*reuseConn),
- global: make(map[int]*reuseConn),
- closeChan: make(chan struct{}),
- gcStopChan: make(chan struct{}),
+ unicast: make(map[string]map[int]*reuseConn),
+ globalListeners: make(map[int]*reuseConn),
+ globalDialers: make(map[int]*reuseConn),
+ closeChan: make(chan struct{}),
+ gcStopChan: make(chan struct{}),
}
go r.gc()
return r
@@ -75,7 +97,10 @@ func newReuse() *reuse {
func (r *reuse) gc() {
defer func() {
r.mutex.Lock()
- for _, conn := range r.global {
+ for _, conn := range r.globalListeners {
+ conn.Close()
+ }
+ for _, conn := range r.globalDialers {
conn.Close()
}
for _, conns := range r.unicast {
@@ -93,12 +118,19 @@ func (r *reuse) gc() {
select {
case <-r.closeChan:
return
- case now := <-ticker.C:
+ case <-ticker.C:
+ now := time.Now()
r.mutex.Lock()
- for key, conn := range r.global {
+ for key, conn := range r.globalListeners {
+ if conn.ShouldGarbageCollect(now) {
+ conn.Close()
+ delete(r.globalListeners, key)
+ }
+ }
+ for key, conn := range r.globalDialers {
if conn.ShouldGarbageCollect(now) {
conn.Close()
- delete(r.global, key)
+ delete(r.globalDialers, key)
}
}
for ukey, conns := range r.unicast {
@@ -167,7 +199,12 @@ func (r *reuse) dialLocked(network string, source *net.IP) (*reuseConn, error) {
// Use a connection listening on 0.0.0.0 (or ::).
// Again, we don't care about the port number.
- for _, conn := range r.global {
+ for _, conn := range r.globalListeners {
+ return conn, nil
+ }
+
+ // Use a connection we've previously dialed from
+ for _, conn := range r.globalDialers {
return conn, nil
}
@@ -185,29 +222,59 @@ func (r *reuse) dialLocked(network string, source *net.IP) (*reuseConn, error) {
return nil, err
}
rconn := newReuseConn(conn)
- r.global[conn.LocalAddr().(*net.UDPAddr).Port] = rconn
+ r.globalDialers[conn.LocalAddr().(*net.UDPAddr).Port] = rconn
return rconn, nil
}
func (r *reuse) Listen(network string, laddr *net.UDPAddr) (*reuseConn, error) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ // Check if we can reuse a connection we have already dialed out from.
+ // We reuse a connection from globalDialers when the requested port is 0 or the requested
+ // port is already in the globalDialers.
+ // If we are reusing a connection from globalDialers, we move the globalDialers entry to
+ // globalListeners
+ if laddr.IP.IsUnspecified() {
+ var rconn *reuseConn
+ var localAddr *net.UDPAddr
+
+ if laddr.Port == 0 {
+ // the requested port is 0, we can reuse any connection
+ for _, conn := range r.globalDialers {
+ rconn = conn
+ localAddr = rconn.UDPConn.LocalAddr().(*net.UDPAddr)
+ delete(r.globalDialers, localAddr.Port)
+ break
+ }
+ } else if _, ok := r.globalDialers[laddr.Port]; ok {
+ rconn = r.globalDialers[laddr.Port]
+ localAddr = rconn.UDPConn.LocalAddr().(*net.UDPAddr)
+ delete(r.globalDialers, localAddr.Port)
+ }
+ // found a match
+ if rconn != nil {
+ rconn.IncreaseCount()
+ r.globalListeners[localAddr.Port] = rconn
+ return rconn, nil
+ }
+ }
+
conn, err := net.ListenUDP(network, laddr)
if err != nil {
return nil, err
}
localAddr := conn.LocalAddr().(*net.UDPAddr)
-
rconn := newReuseConn(conn)
- rconn.IncreaseCount()
- r.mutex.Lock()
- defer r.mutex.Unlock()
+ rconn.IncreaseCount()
// Deal with listen on a global address
if localAddr.IP.IsUnspecified() {
// The kernel already checked that the laddr is not already listen
// so we need not check here (when we create ListenUDP).
- r.global[localAddr.Port] = rconn
- return rconn, err
+ r.globalListeners[localAddr.Port] = rconn
+ return rconn, nil
}
// Deal with listen on a unicast address
@@ -221,7 +288,7 @@ func (r *reuse) Listen(network string, laddr *net.UDPAddr) (*reuseConn, error) {
// The kernel already checked that the laddr is not already listen
// so we need not check here (when we create ListenUDP).
r.unicast[localAddr.IP.String()][localAddr.Port] = rconn
- return rconn, err
+ return rconn, nil
}
func (r *reuse) Close() error {
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go
similarity index 78%
rename from vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer.go
rename to vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go
index c5bbf7149..46a683cbc 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go
@@ -1,4 +1,4 @@
-package libp2pquic
+package quicreuse
import (
"bufio"
@@ -7,22 +7,22 @@ import (
"os"
"time"
+ golog "github.com/ipfs/go-log/v2"
"github.com/klauspost/compress/zstd"
-
- "github.com/lucas-clemente/quic-go/logging"
- "github.com/lucas-clemente/quic-go/qlog"
+ "github.com/quic-go/quic-go/logging"
+ "github.com/quic-go/quic-go/qlog"
)
-var tracer logging.Tracer
+var log = golog.Logger("quic-utils")
+
+// QLOGTracer holds a qlog tracer, if qlogging is enabled (enabled using the QLOGDIR environment variable).
+// Otherwise it is nil.
+var qlogTracer logging.Tracer
func init() {
- tracers := []logging.Tracer{&metricsTracer{}}
if qlogDir := os.Getenv("QLOGDIR"); len(qlogDir) > 0 {
- if qlogger := initQlogger(qlogDir); qlogger != nil {
- tracers = append(tracers, qlogger)
- }
+ qlogTracer = initQlogger(qlogDir)
}
- tracer = logging.NewMultiplexedTracer(tracers...)
}
func initQlogger(qlogDir string) logging.Tracer {
@@ -62,7 +62,9 @@ func newQlogger(qlogDir string, role logging.Perspective, connID []byte) io.Writ
return &qlogger{
f: f,
filename: finalFilename,
- Writer: bufio.NewWriter(f),
+ // The size of a qlog file for a raw file download is ~2/3 of the amount of data transferred.
+ // bufio.NewWriter creates a buffer with a buffer of only 4 kB, leading to a large number of syscalls.
+ Writer: bufio.NewWriterSize(f, 128<<10),
}
}
@@ -80,7 +82,7 @@ func (l *qlogger) Close() error {
return err
}
defer f.Close()
- buf := bufio.NewWriter(f)
+ buf := bufio.NewWriterSize(f, 128<<10)
c, err := zstd.NewWriter(buf, zstd.WithEncoderLevel(zstd.SpeedFastest), zstd.WithWindowSize(32*1024))
if err != nil {
return err
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go
similarity index 84%
rename from vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer_metrics.go
rename to vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go
index cbf2701fe..03e73fd25 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/tracer_metrics.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go
@@ -1,4 +1,4 @@
-package libp2pquic
+package quicreuse
import (
"context"
@@ -9,9 +9,8 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
-
- "github.com/lucas-clemente/quic-go"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/logging"
)
var (
@@ -85,7 +84,9 @@ func (c *aggregatingCollector) RemoveConn(id string) {
var collector *aggregatingCollector
-func init() {
+var initMetricsOnce sync.Once
+
+func initMetrics() {
const (
direction = "direction"
encLevel = "encryption_level"
@@ -167,10 +168,17 @@ func init() {
prometheus.MustRegister(collector)
}
-type metricsTracer struct{}
+type metricsTracer struct {
+ logging.NullTracer
+}
var _ logging.Tracer = &metricsTracer{}
+func newMetricsTracer() *metricsTracer {
+ initMetricsOnce.Do(func() { initMetrics() })
+ return &metricsTracer{}
+}
+
func (m *metricsTracer) TracerForConnection(_ context.Context, p logging.Perspective, connID logging.ConnectionID) logging.ConnectionTracer {
return &metricsConnTracer{perspective: p, connID: connID}
}
@@ -179,10 +187,9 @@ func (m *metricsTracer) SentPacket(_ net.Addr, _ *logging.Header, size logging.B
bytesTransferred.WithLabelValues("sent").Add(float64(size))
}
-func (m *metricsTracer) DroppedPacket(addr net.Addr, packetType logging.PacketType, count logging.ByteCount, reason logging.PacketDropReason) {
-}
-
type metricsConnTracer struct {
+ logging.NullConnectionTracer
+
perspective logging.Perspective
startTime time.Time
connID logging.ConnectionID
@@ -224,9 +231,6 @@ func (m *metricsConnTracer) StartedConnection(net.Addr, net.Addr, logging.Connec
collector.AddConn(m.connID.String(), m)
}
-func (m *metricsConnTracer) NegotiatedVersion(chosen quic.VersionNumber, clientVersions []quic.VersionNumber, serverVersions []quic.VersionNumber) {
-}
-
func (m *metricsConnTracer) ClosedConnection(e error) {
var (
applicationErr *quic.ApplicationError
@@ -264,16 +268,13 @@ func (m *metricsConnTracer) ClosedConnection(e error) {
}
connErrors.WithLabelValues(side, desc).Inc()
}
-func (m *metricsConnTracer) SentTransportParameters(parameters *logging.TransportParameters) {}
-func (m *metricsConnTracer) ReceivedTransportParameters(parameters *logging.TransportParameters) {}
-func (m *metricsConnTracer) RestoredTransportParameters(parameters *logging.TransportParameters) {}
func (m *metricsConnTracer) SentPacket(hdr *logging.ExtendedHeader, size logging.ByteCount, _ *logging.AckFrame, _ []logging.Frame) {
bytesTransferred.WithLabelValues("sent").Add(float64(size))
sentPackets.WithLabelValues(m.getEncLevel(logging.PacketTypeFromHeader(&hdr.Header))).Inc()
}
-func (m *metricsConnTracer) ReceivedVersionNegotiationPacket(hdr *logging.Header, v []logging.VersionNumber) {
- bytesTransferred.WithLabelValues("rcvd").Add(float64(hdr.ParsedLen() + logging.ByteCount(4*len(v))))
+func (m *metricsConnTracer) ReceivedVersionNegotiationPacket(dst, src logging.ArbitraryLenConnectionID, v []logging.VersionNumber) {
+ bytesTransferred.WithLabelValues("rcvd").Add(1 /* header form byte */ + 4 /* version number */ + 2 /* src and dest conn id length fields */ + float64(dst.Len()+src.Len()) + float64(4*len(v)))
rcvdPackets.WithLabelValues("Version Negotiation").Inc()
}
@@ -286,7 +287,7 @@ func (m *metricsConnTracer) ReceivedPacket(hdr *logging.ExtendedHeader, size log
rcvdPackets.WithLabelValues(m.getEncLevel(logging.PacketTypeFromHeader(&hdr.Header))).Inc()
}
-func (m *metricsConnTracer) BufferedPacket(packetType logging.PacketType) {
+func (m *metricsConnTracer) BufferedPacket(packetType logging.PacketType, _ logging.ByteCount) {
bufferedPackets.WithLabelValues(m.getEncLevel(packetType)).Inc()
}
@@ -329,8 +330,6 @@ func (m *metricsConnTracer) UpdatedMetrics(rttStats *logging.RTTStats, cwnd, byt
m.mutex.Unlock()
}
-func (m *metricsConnTracer) AcknowledgedPacket(logging.EncryptionLevel, logging.PacketNumber) {}
-
func (m *metricsConnTracer) LostPacket(level logging.EncryptionLevel, _ logging.PacketNumber, r logging.PacketLossReason) {
var reason string
switch r {
@@ -344,23 +343,11 @@ func (m *metricsConnTracer) LostPacket(level logging.EncryptionLevel, _ logging.
lostPackets.WithLabelValues(level.String(), reason).Inc()
}
-func (m *metricsConnTracer) UpdatedCongestionState(state logging.CongestionState) {}
-func (m *metricsConnTracer) UpdatedPTOCount(value uint32) {}
-func (m *metricsConnTracer) UpdatedKeyFromTLS(level logging.EncryptionLevel, perspective logging.Perspective) {
-}
-func (m *metricsConnTracer) UpdatedKey(generation logging.KeyPhase, remote bool) {}
func (m *metricsConnTracer) DroppedEncryptionLevel(level logging.EncryptionLevel) {
if level == logging.EncryptionHandshake {
m.handleHandshakeComplete()
}
}
-func (m *metricsConnTracer) DroppedKey(generation logging.KeyPhase) {}
-func (m *metricsConnTracer) SetLossTimer(timerType logging.TimerType, level logging.EncryptionLevel, time time.Time) {
-}
-
-func (m *metricsConnTracer) LossTimerExpired(timerType logging.TimerType, level logging.EncryptionLevel) {
-}
-func (m *metricsConnTracer) LossTimerCanceled() {}
func (m *metricsConnTracer) Close() {
if m.handshakeComplete {
@@ -371,8 +358,6 @@ func (m *metricsConnTracer) Close() {
collector.RemoveConn(m.connID.String())
}
-func (m *metricsConnTracer) Debug(name, msg string) {}
-
func (m *metricsConnTracer) handleHandshakeComplete() {
m.handshakeComplete = true
newConns.WithLabelValues(m.getDirection(), "true").Inc()
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go
index 0ae24801a..fc2add49b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go
@@ -26,7 +26,9 @@ const collectFrequency = 10 * time.Second
var collector *aggregatingCollector
-func init() {
+var initMetricsOnce sync.Once
+
+func initMetrics() {
segsSentDesc = prometheus.NewDesc("tcp_sent_segments_total", "TCP segments sent", nil, nil)
segsRcvdDesc = prometheus.NewDesc("tcp_rcvd_segments_total", "TCP segments received", nil, nil)
bytesSentDesc = prometheus.NewDesc("tcp_sent_bytes", "TCP bytes sent", nil, nil)
@@ -210,6 +212,7 @@ type tracingConn struct {
}
func newTracingConn(c manet.Conn, isClient bool) (*tracingConn, error) {
+ initMetricsOnce.Do(func() { initMetrics() })
conn, err := tcp.NewConn(c)
if err != nil {
return nil, err
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go
index 9e6f53862..f277b3f8f 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go
@@ -106,14 +106,21 @@ func WithConnectionTimeout(d time.Duration) Option {
}
}
+func WithMetrics() Option {
+ return func(tr *TcpTransport) error {
+ tr.enableMetrics = true
+ return nil
+ }
+}
+
// TcpTransport is the TCP transport.
type TcpTransport struct {
// Connection upgrader for upgrading insecure stream connections to
// secure multiplex connections.
upgrader transport.Upgrader
- // Explicitly disable reuseport.
- disableReuseport bool
+ disableReuseport bool // Explicitly disable reuseport.
+ enableMetrics bool
// TCP connect timeout
connectTimeout time.Duration
@@ -129,7 +136,7 @@ var _ transport.Transport = &TcpTransport{}
// created. It represents an entire TCP stack (though it might not necessarily be).
func NewTCPTransport(upgrader transport.Upgrader, rcmgr network.ResourceManager, opts ...Option) (*TcpTransport, error) {
if rcmgr == nil {
- rcmgr = network.NullResourceManager
+ rcmgr = &network.NullResourceManager{}
}
tr := &TcpTransport{
upgrader: upgrader,
@@ -174,14 +181,22 @@ func (t *TcpTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID)
log.Debugw("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err)
return nil, err
}
+
+ c, err := t.dialWithScope(ctx, raddr, p, connScope)
+ if err != nil {
+ connScope.Done()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (t *TcpTransport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
if err := connScope.SetPeer(p); err != nil {
log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err)
- connScope.Done()
return nil, err
}
conn, err := t.maDial(ctx, raddr)
if err != nil {
- connScope.Done()
return nil, err
}
// Set linger to 0 so we never get stuck in the TIME-WAIT state. When
@@ -189,10 +204,13 @@ func (t *TcpTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID)
// This means we can immediately reuse the 5-tuple and reconnect.
tryLinger(conn, 0)
tryKeepAlive(conn, true)
- c, err := newTracingConn(conn, true)
- if err != nil {
- connScope.Done()
- return nil, err
+ c := conn
+ if t.enableMetrics {
+ var err error
+ c, err = newTracingConn(conn, true)
+ if err != nil {
+ return nil, err
+ }
}
direction := network.DirOutbound
if ok, isClient, _ := network.GetSimultaneousConnect(ctx); ok && !isClient {
@@ -219,7 +237,9 @@ func (t *TcpTransport) Listen(laddr ma.Multiaddr) (transport.Listener, error) {
if err != nil {
return nil, err
}
- list = newTracingListener(&tcpListener{list, 0})
+ if t.enableMetrics {
+ list = newTracingListener(&tcpListener{list, 0})
+ }
return t.upgrader.UpgradeListener(t, list), nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go
index 608eb2d0d..5fea8567b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go
@@ -105,23 +105,17 @@ func ParseWebsocketNetAddr(a net.Addr) (ma.Multiaddr, error) {
}
func parseMultiaddr(maddr ma.Multiaddr) (*url.URL, error) {
- // Only look at the _last_ component.
- maddr, wscomponent := ma.SplitLast(maddr)
- if maddr == nil || wscomponent == nil {
- return nil, fmt.Errorf("websocket addrs need at least two components")
+ parsed, err := parseWebsocketMultiaddr(maddr)
+ if err != nil {
+ return nil, err
}
- var scheme string
- switch wscomponent.Protocol().Code {
- case ma.P_WS:
- scheme = "ws"
- case ma.P_WSS:
+ scheme := "ws"
+ if parsed.isWSS {
scheme = "wss"
- default:
- return nil, fmt.Errorf("not a websocket multiaddr")
}
- network, host, err := manet.DialArgs(maddr)
+ network, host, err := manet.DialArgs(parsed.restMultiaddr)
if err != nil {
return nil, err
}
@@ -135,3 +129,47 @@ func parseMultiaddr(maddr ma.Multiaddr) (*url.URL, error) {
Host: host,
}, nil
}
+
+type parsedWebsocketMultiaddr struct {
+ isWSS bool
+ // sni is the SNI value for the TLS handshake, and for setting HTTP Host header
+ sni *ma.Component
+ // the rest of the multiaddr before the /tls/sni/example.com/ws or /ws or /wss
+ restMultiaddr ma.Multiaddr
+}
+
+func parseWebsocketMultiaddr(a ma.Multiaddr) (parsedWebsocketMultiaddr, error) {
+ out := parsedWebsocketMultiaddr{}
+ // First check if we have a WSS component. If so we'll canonicalize it into a /tls/ws
+ withoutWss := a.Decapsulate(wssComponent)
+ if !withoutWss.Equal(a) {
+ a = withoutWss.Encapsulate(tlsWsComponent)
+ }
+
+ // Remove the ws component
+ withoutWs := a.Decapsulate(wsComponent)
+ if withoutWs.Equal(a) {
+ return out, fmt.Errorf("not a websocket multiaddr")
+ }
+
+ rest := withoutWs
+ // If this is not a wss then withoutWs is the rest of the multiaddr
+ out.restMultiaddr = withoutWs
+ for {
+ var head *ma.Component
+ rest, head = ma.SplitLast(rest)
+ if head == nil || rest == nil {
+ break
+ }
+
+ if head.Protocol().Code == ma.P_SNI {
+ out.sni = head
+ } else if head.Protocol().Code == ma.P_TLS {
+ out.isWSS = true
+ out.restMultiaddr = rest
+ break
+ }
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go
index 6f2e0a766..30b70055d 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go
@@ -6,6 +6,9 @@ import (
"sync"
"time"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+
ws "github.com/gorilla/websocket"
)
@@ -149,3 +152,13 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.Conn.SetWriteDeadline(t)
}
+
+type capableConn struct {
+ transport.CapableConn
+}
+
+func (c *capableConn) ConnState() network.ConnectionState {
+ cs := c.CapableConn.ConnState()
+ cs.Transport = "websocket"
+ return cs
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go
index b94bed798..d7a1b885b 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go
@@ -5,19 +5,20 @@ import (
"fmt"
"net"
"net/http"
+ "strings"
+
+ "github.com/libp2p/go-libp2p/core/transport"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
-var (
- wsma = ma.StringCast("/ws")
- wssma = ma.StringCast("/wss")
-)
-
type listener struct {
nl net.Listener
server http.Server
+ // The Go standard library sets the http.Server.TLSConfig no matter if this is a WS or WSS,
+ // so we can't rely on checking if server.TLSConfig is set.
+ isWss bool
laddr ma.Multiaddr
@@ -25,16 +26,31 @@ type listener struct {
incoming chan *Conn
}
+func (pwma *parsedWebsocketMultiaddr) toMultiaddr() ma.Multiaddr {
+ if !pwma.isWSS {
+ return pwma.restMultiaddr.Encapsulate(wsComponent)
+ }
+
+ if pwma.sni == nil {
+ return pwma.restMultiaddr.Encapsulate(tlsComponent).Encapsulate(wsComponent)
+ }
+
+ return pwma.restMultiaddr.Encapsulate(tlsComponent).Encapsulate(pwma.sni).Encapsulate(wsComponent)
+}
+
// newListener creates a new listener from a raw net.Listener.
// tlsConf may be nil (for unencrypted websockets).
func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) {
- // Only look at the _last_ component.
- maddr, wscomponent := ma.SplitLast(a)
- isWSS := wscomponent.Equal(wssma)
- if isWSS && tlsConf == nil {
+ parsed, err := parseWebsocketMultiaddr(a)
+ if err != nil {
+ return nil, err
+ }
+
+ if parsed.isWSS && tlsConf == nil {
return nil, fmt.Errorf("cannot listen on wss address %s without a tls.Config", a)
}
- lnet, lnaddr, err := manet.DialArgs(maddr)
+
+ lnet, lnaddr, err := manet.DialArgs(parsed.restMultiaddr)
if err != nil {
return nil, err
}
@@ -54,15 +70,17 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) {
_, last := ma.SplitFirst(laddr)
laddr = first.Encapsulate(last)
}
+ parsed.restMultiaddr = laddr
ln := &listener{
nl: nl,
- laddr: laddr.Encapsulate(wscomponent),
+ laddr: parsed.toMultiaddr(),
incoming: make(chan *Conn),
closed: make(chan struct{}),
}
ln.server = http.Server{Handler: ln}
- if isWSS {
+ if parsed.isWSS {
+ ln.isWss = true
ln.server.TLSConfig = tlsConf
}
return ln, nil
@@ -70,7 +88,7 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) {
func (l *listener) serve() {
defer close(l.closed)
- if l.server.TLSConfig == nil {
+ if !l.isWss {
l.server.Serve(l.nl)
} else {
l.server.ServeTLS(l.nl, "", "")
@@ -85,7 +103,7 @@ func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
select {
- case l.incoming <- NewConn(c, false):
+ case l.incoming <- NewConn(c, l.isWss):
case <-l.closed:
c.Close()
}
@@ -96,7 +114,7 @@ func (l *listener) Accept() (manet.Conn, error) {
select {
case c, ok := <-l.incoming:
if !ok {
- return nil, fmt.Errorf("listener is closed")
+ return nil, transport.ErrListenerClosed
}
mnc, err := manet.WrapNetConn(c)
@@ -107,7 +125,7 @@ func (l *listener) Accept() (manet.Conn, error) {
return mnc, nil
case <-l.closed:
- return nil, fmt.Errorf("listener is closed")
+ return nil, transport.ErrListenerClosed
}
}
@@ -119,9 +137,24 @@ func (l *listener) Close() error {
l.server.Close()
err := l.nl.Close()
<-l.closed
+ if strings.Contains(err.Error(), "use of closed network connection") {
+ return transport.ErrListenerClosed
+ }
return err
}
func (l *listener) Multiaddr() ma.Multiaddr {
return l.laddr
}
+
+type transportListener struct {
+ transport.Listener
+}
+
+func (l *transportListener) Accept() (transport.CapableConn, error) {
+ conn, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ return &capableConn{CapableConn: conn}, nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go
index 42c4c618f..e1965123d 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go
@@ -4,6 +4,7 @@ package websocket
import (
"context"
"crypto/tls"
+ "net"
"net/http"
"time"
@@ -21,9 +22,27 @@ import (
// WsFmt is multiaddr formatter for WsProtocol
var WsFmt = mafmt.And(mafmt.TCP, mafmt.Base(ma.P_WS))
-// This is _not_ WsFmt because we want the transport to stick to dialing fully
-// resolved addresses.
-var dialMatcher = mafmt.And(mafmt.Or(mafmt.IP, mafmt.DNS), mafmt.Base(ma.P_TCP), mafmt.Or(mafmt.Base(ma.P_WS), mafmt.Base(ma.P_WSS)))
+var dialMatcher = mafmt.And(
+ mafmt.Or(mafmt.IP, mafmt.DNS),
+ mafmt.Base(ma.P_TCP),
+ mafmt.Or(
+ mafmt.Base(ma.P_WS),
+ mafmt.And(
+ mafmt.Or(
+ mafmt.And(
+ mafmt.Base(ma.P_TLS),
+ mafmt.Base(ma.P_SNI)),
+ mafmt.Base(ma.P_TLS),
+ ),
+ mafmt.Base(ma.P_WS)),
+ mafmt.Base(ma.P_WSS)))
+
+var (
+ wssComponent = ma.StringCast("/wss")
+ tlsWsComponent = ma.StringCast("/tls/ws")
+ tlsComponent = ma.StringCast("/tls")
+ wsComponent = ma.StringCast("/ws")
+)
func init() {
manet.RegisterFromNetAddr(ParseWebsocketNetAddr, "websocket")
@@ -74,11 +93,12 @@ var _ transport.Transport = (*WebsocketTransport)(nil)
func New(u transport.Upgrader, rcmgr network.ResourceManager, opts ...Option) (*WebsocketTransport, error) {
if rcmgr == nil {
- rcmgr = network.NullResourceManager
+ rcmgr = &network.NullResourceManager{}
}
t := &WebsocketTransport{
- upgrader: u,
- rcmgr: rcmgr,
+ upgrader: u,
+ rcmgr: rcmgr,
+ tlsClientConf: &tls.Config{},
}
for _, opt := range opts {
if err := opt(t); err != nil {
@@ -100,17 +120,65 @@ func (t *WebsocketTransport) Proxy() bool {
return false
}
+func (t *WebsocketTransport) Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) {
+ parsed, err := parseWebsocketMultiaddr(maddr)
+ if err != nil {
+ return nil, err
+ }
+
+ if !parsed.isWSS {
+ // No /tls/ws component, this isn't a secure websocket multiaddr. We can just return it here
+ return []ma.Multiaddr{maddr}, nil
+ }
+
+ if parsed.sni == nil {
+ var err error
+ // We don't have an sni component, we'll use dns/dnsaddr
+ ma.ForEach(parsed.restMultiaddr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR:
+ // err shouldn't happen since this means we couldn't parse a dns hostname for an sni value.
+ parsed.sni, err = ma.NewComponent("sni", c.Value())
+ return false
+ }
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if parsed.sni == nil {
+ // we didn't find anything to set the sni with. So we just return the given multiaddr
+ return []ma.Multiaddr{maddr}, nil
+ }
+
+ return []ma.Multiaddr{parsed.toMultiaddr()}, nil
+}
+
func (t *WebsocketTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (transport.CapableConn, error) {
connScope, err := t.rcmgr.OpenConnection(network.DirOutbound, true, raddr)
if err != nil {
return nil, err
}
- macon, err := t.maDial(ctx, raddr)
+ c, err := t.dialWithScope(ctx, raddr, p, connScope)
if err != nil {
connScope.Done()
return nil, err
}
- return t.upgrader.Upgrade(ctx, t, macon, network.DirOutbound, p, connScope)
+ return c, nil
+}
+
+func (t *WebsocketTransport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
+ macon, err := t.maDial(ctx, raddr)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := t.upgrader.Upgrade(ctx, t, macon, network.DirOutbound, p, connScope)
+ if err != nil {
+ return nil, err
+ }
+ return &capableConn{CapableConn: conn}, nil
}
func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (manet.Conn, error) {
@@ -121,9 +189,32 @@ func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (ma
isWss := wsurl.Scheme == "wss"
dialer := ws.Dialer{HandshakeTimeout: 30 * time.Second}
if isWss {
- dialer.TLSClientConfig = t.tlsClientConf
+ sni := ""
+ sni, err = raddr.ValueForProtocol(ma.P_SNI)
+ if err != nil {
+ sni = ""
+ }
+ if sni != "" {
+ copytlsClientConf := t.tlsClientConf.Clone()
+ copytlsClientConf.ServerName = sni
+ dialer.TLSClientConfig = copytlsClientConf
+ ipAddr := wsurl.Host
+ // Setting the NetDial because we already have the resolved IP address, so we don't want to do another resolution.
+ // We set the `.Host` to the sni field so that the host header gets properly set.
+ dialer.NetDial = func(network, address string) (net.Conn, error) {
+ tcpAddr, err := net.ResolveTCPAddr(network, ipAddr)
+ if err != nil {
+ return nil, err
+ }
+ return net.DialTCP("tcp", nil, tcpAddr)
+ }
+ wsurl.Host = sni + ":" + wsurl.Port()
+ } else {
+ dialer.TLSClientConfig = t.tlsClientConf
+ }
}
+
wscon, _, err := dialer.DialContext(ctx, wsurl.String(), nil)
if err != nil {
return nil, err
@@ -151,5 +242,5 @@ func (t *WebsocketTransport) Listen(a ma.Multiaddr) (transport.Listener, error)
if err != nil {
return nil, err
}
- return t.upgrader.UpgradeListener(t, malist), nil
+ return &transportListener{Listener: t.upgrader.UpgradeListener(t, malist)}, nil
}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go
new file mode 100644
index 000000000..d48a0aa53
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go
@@ -0,0 +1,213 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multihash"
+)
+
+// Allow for a bit of clock skew.
+// When we generate a certificate, the NotBefore time is set to clockSkewAllowance before the current time.
+// Similarly, we stop using a certificate one clockSkewAllowance before its expiry time.
+const clockSkewAllowance = time.Hour
+const validityMinusTwoSkew = certValidity - (2 * clockSkewAllowance)
+
+type certConfig struct {
+ tlsConf *tls.Config
+ sha256 [32]byte // cached from the tlsConf
+}
+
+func (c *certConfig) Start() time.Time { return c.tlsConf.Certificates[0].Leaf.NotBefore }
+func (c *certConfig) End() time.Time { return c.tlsConf.Certificates[0].Leaf.NotAfter }
+
+func newCertConfig(key ic.PrivKey, start, end time.Time) (*certConfig, error) {
+ conf, err := getTLSConf(key, start, end)
+ if err != nil {
+ return nil, err
+ }
+ return &certConfig{
+ tlsConf: conf,
+ sha256: sha256.Sum256(conf.Certificates[0].Leaf.Raw),
+ }, nil
+}
+
+// Certificate renewal logic:
+// 1. On startup, we generate one cert that is valid from now (-1h, to allow for clock skew), and another
+// cert that is valid from the expiry date of the first certificate (again, with allowance for clock skew).
+// 2. Once we reach 1h before expiry of the first certificate, we switch over to the second certificate.
+// At the same time, we stop advertising the certhash of the first cert and generate the next cert.
+type certManager struct {
+ clock clock.Clock
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ refCount sync.WaitGroup
+
+ mx sync.RWMutex
+ lastConfig *certConfig // initially nil
+ currentConfig *certConfig
+ nextConfig *certConfig // nil until we have passed half the certValidity of the current config
+ addrComp ma.Multiaddr
+
+ serializedCertHashes [][]byte
+}
+
+func newCertManager(hostKey ic.PrivKey, clock clock.Clock) (*certManager, error) {
+ m := &certManager{clock: clock}
+ m.ctx, m.ctxCancel = context.WithCancel(context.Background())
+ if err := m.init(hostKey); err != nil {
+ return nil, err
+ }
+
+ m.background(hostKey)
+ return m, nil
+}
+
+// getCurrentTimeBucket returns the canonical start time of the given time as
+// bucketed by ranges of certValidity since unix epoch (plus an offset). This
+// lets you get the same time ranges across reboots without having to persist
+// state.
+// ```
+// ... v--- epoch + offset
+// ... |--------| |--------| ...
+// ... |--------| |--------| ...
+// ```
+func getCurrentBucketStartTime(now time.Time, offset time.Duration) time.Time {
+ currentBucket := (now.UnixMilli() - offset.Milliseconds()) / validityMinusTwoSkew.Milliseconds()
+ return time.UnixMilli(offset.Milliseconds() + currentBucket*validityMinusTwoSkew.Milliseconds())
+}
+
+func (m *certManager) init(hostKey ic.PrivKey) error {
+ start := m.clock.Now()
+ pubkeyBytes, err := hostKey.GetPublic().Raw()
+ if err != nil {
+ return err
+ }
+
+ // We want to add a random offset to each start time so that not all certs
+ // rotate at the same time across the network. The offset represents moving
+ // the bucket start time some `offset` earlier.
+ offset := (time.Duration(binary.LittleEndian.Uint16(pubkeyBytes)) * time.Minute) % certValidity
+
+ // We want the certificate have been valid for at least one clockSkewAllowance
+ start = start.Add(-clockSkewAllowance)
+ startTime := getCurrentBucketStartTime(start, offset)
+ m.nextConfig, err = newCertConfig(hostKey, startTime, startTime.Add(certValidity))
+ if err != nil {
+ return err
+ }
+ return m.rollConfig(hostKey)
+}
+
+func (m *certManager) rollConfig(hostKey ic.PrivKey) error {
+ // We stop using the current certificate clockSkewAllowance before its expiry time.
+ // At this point, the next certificate needs to be valid for one clockSkewAllowance.
+ nextStart := m.nextConfig.End().Add(-2 * clockSkewAllowance)
+ c, err := newCertConfig(hostKey, nextStart, nextStart.Add(certValidity))
+ if err != nil {
+ return err
+ }
+ m.lastConfig = m.currentConfig
+ m.currentConfig = m.nextConfig
+ m.nextConfig = c
+ if err := m.cacheSerializedCertHashes(); err != nil {
+ return err
+ }
+ return m.cacheAddrComponent()
+}
+
+func (m *certManager) background(hostKey ic.PrivKey) {
+ d := m.currentConfig.End().Add(-clockSkewAllowance).Sub(m.clock.Now())
+ log.Debugw("setting timer", "duration", d.String())
+ t := m.clock.Timer(d)
+ m.refCount.Add(1)
+
+ go func() {
+ defer m.refCount.Done()
+ defer t.Stop()
+
+ for {
+ select {
+ case <-m.ctx.Done():
+ return
+ case <-t.C:
+ now := m.clock.Now()
+ m.mx.Lock()
+ if err := m.rollConfig(hostKey); err != nil {
+ log.Errorw("rolling config failed", "error", err)
+ }
+ d := m.currentConfig.End().Add(-clockSkewAllowance).Sub(now)
+ log.Debugw("rolling certificates", "next", d.String())
+ t.Reset(d)
+ m.mx.Unlock()
+ }
+ }
+ }()
+}
+
+func (m *certManager) GetConfig() *tls.Config {
+ m.mx.RLock()
+ defer m.mx.RUnlock()
+ return m.currentConfig.tlsConf
+}
+
+func (m *certManager) AddrComponent() ma.Multiaddr {
+ m.mx.RLock()
+ defer m.mx.RUnlock()
+ return m.addrComp
+}
+
+func (m *certManager) SerializedCertHashes() [][]byte {
+ return m.serializedCertHashes
+}
+
+func (m *certManager) cacheSerializedCertHashes() error {
+ hashes := make([][32]byte, 0, 3)
+ if m.lastConfig != nil {
+ hashes = append(hashes, m.lastConfig.sha256)
+ }
+ hashes = append(hashes, m.currentConfig.sha256)
+ if m.nextConfig != nil {
+ hashes = append(hashes, m.nextConfig.sha256)
+ }
+
+ m.serializedCertHashes = m.serializedCertHashes[:0]
+ for _, certHash := range hashes {
+ h, err := multihash.Encode(certHash[:], multihash.SHA2_256)
+ if err != nil {
+ return fmt.Errorf("failed to encode certificate hash: %w", err)
+ }
+ m.serializedCertHashes = append(m.serializedCertHashes, h)
+ }
+ return nil
+}
+
+func (m *certManager) cacheAddrComponent() error {
+ addr, err := addrComponentForCert(m.currentConfig.sha256[:])
+ if err != nil {
+ return err
+ }
+ if m.nextConfig != nil {
+ comp, err := addrComponentForCert(m.nextConfig.sha256[:])
+ if err != nil {
+ return err
+ }
+ addr = addr.Encapsulate(comp)
+ }
+ m.addrComp = addr
+ return nil
+}
+
+func (m *certManager) Close() error {
+ m.ctxCancel()
+ m.refCount.Wait()
+ return nil
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go
new file mode 100644
index 000000000..0e83b1d16
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go
@@ -0,0 +1,82 @@
+package libp2pwebtransport
+
+import (
+ "context"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/webtransport-go"
+)
+
+type connSecurityMultiaddrs struct {
+ network.ConnSecurity
+ network.ConnMultiaddrs
+}
+
+type connMultiaddrs struct {
+ local, remote ma.Multiaddr
+}
+
+var _ network.ConnMultiaddrs = &connMultiaddrs{}
+
+func (c *connMultiaddrs) LocalMultiaddr() ma.Multiaddr { return c.local }
+func (c *connMultiaddrs) RemoteMultiaddr() ma.Multiaddr { return c.remote }
+
+type conn struct {
+ *connSecurityMultiaddrs
+
+ transport *transport
+ session *webtransport.Session
+
+ scope network.ConnManagementScope
+}
+
+var _ tpt.CapableConn = &conn{}
+
+func newConn(tr *transport, sess *webtransport.Session, sconn *connSecurityMultiaddrs, scope network.ConnManagementScope) *conn {
+ return &conn{
+ connSecurityMultiaddrs: sconn,
+ transport: tr,
+ session: sess,
+ scope: scope,
+ }
+}
+
+func (c *conn) OpenStream(ctx context.Context) (network.MuxedStream, error) {
+ str, err := c.session.OpenStreamSync(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &stream{str}, nil
+}
+
+func (c *conn) AcceptStream() (network.MuxedStream, error) {
+ str, err := c.session.AcceptStream(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ return &stream{str}, nil
+}
+
+func (c *conn) allowWindowIncrease(size uint64) bool {
+ return c.scope.ReserveMemory(int(size), network.ReservationPriorityMedium) == nil
+}
+
+// Close closes the connection.
+// It must be called even if the peer closed the connection in order for
+// garbage collection to properly work in this package.
+func (c *conn) Close() error {
+ c.scope.Done()
+ c.transport.removeConn(c.session)
+ return c.session.CloseWithError(0, "")
+}
+
+func (c *conn) IsClosed() bool { return c.session.Context().Err() != nil }
+func (c *conn) Scope() network.ConnScope { return c.scope }
+func (c *conn) Transport() tpt.Transport { return c.transport }
+
+func (c *conn) ConnState() network.ConnectionState {
+ return network.ConnectionState{Transport: "webtransport"}
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go
new file mode 100644
index 000000000..bdc121c52
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go
@@ -0,0 +1,155 @@
+package libp2pwebtransport
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/sha256"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "time"
+
+ "golang.org/x/crypto/hkdf"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+
+ "github.com/multiformats/go-multihash"
+ "github.com/quic-go/quic-go/http3"
+)
+
+const deterministicCertInfo = "determinisitic cert"
+
+func getTLSConf(key ic.PrivKey, start, end time.Time) (*tls.Config, error) {
+ cert, priv, err := generateCert(key, start, end)
+ if err != nil {
+ return nil, err
+ }
+ return &tls.Config{
+ Certificates: []tls.Certificate{{
+ Certificate: [][]byte{cert.Raw},
+ PrivateKey: priv,
+ Leaf: cert,
+ }},
+ NextProtos: []string{http3.NextProtoH3},
+ }, nil
+}
+
+// generateCert generates certs deterministically based on the `key` and start
+// time passed in. Uses `golang.org/x/crypto/hkdf`.
+func generateCert(key ic.PrivKey, start, end time.Time) (*x509.Certificate, *ecdsa.PrivateKey, error) {
+ keyBytes, err := key.Raw()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ startTimeSalt := make([]byte, 8)
+ binary.LittleEndian.PutUint64(startTimeSalt, uint64(start.UnixNano()))
+ deterministicHKDFReader := newDeterministicReader(keyBytes, startTimeSalt, deterministicCertInfo)
+
+ b := make([]byte, 8)
+ if _, err := deterministicHKDFReader.Read(b); err != nil {
+ return nil, nil, err
+ }
+ serial := int64(binary.BigEndian.Uint64(b))
+ if serial < 0 {
+ serial = -serial
+ }
+ certTempl := &x509.Certificate{
+ SerialNumber: big.NewInt(serial),
+ Subject: pkix.Name{},
+ NotBefore: start,
+ NotAfter: end,
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ }
+
+ caPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), deterministicHKDFReader)
+ if err != nil {
+ return nil, nil, err
+ }
+ caBytes, err := x509.CreateCertificate(deterministicHKDFReader, certTempl, certTempl, caPrivateKey.Public(), caPrivateKey)
+ if err != nil {
+ return nil, nil, err
+ }
+ ca, err := x509.ParseCertificate(caBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ return ca, caPrivateKey, nil
+}
+
+func verifyRawCerts(rawCerts [][]byte, certHashes []multihash.DecodedMultihash) error {
+ if len(rawCerts) < 1 {
+ return errors.New("no cert")
+ }
+ leaf := rawCerts[len(rawCerts)-1]
+ // The W3C WebTransport specification currently only allows SHA-256 certificates for serverCertificateHashes.
+ hash := sha256.Sum256(leaf)
+ var verified bool
+ for _, h := range certHashes {
+ if h.Code == multihash.SHA2_256 && bytes.Equal(h.Digest, hash[:]) {
+ verified = true
+ break
+ }
+ }
+ if !verified {
+ digests := make([][]byte, 0, len(certHashes))
+ for _, h := range certHashes {
+ digests = append(digests, h.Digest)
+ }
+ return fmt.Errorf("cert hash not found: %#x (expected: %#x)", hash, digests)
+ }
+
+ cert, err := x509.ParseCertificate(leaf)
+ if err != nil {
+ return err
+ }
+ // TODO: is this the best (and complete?) way to identify RSA certificates?
+ switch cert.SignatureAlgorithm {
+ case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.MD2WithRSA, x509.MD5WithRSA:
+ return errors.New("cert uses RSA")
+ }
+ if l := cert.NotAfter.Sub(cert.NotBefore); l > 14*24*time.Hour {
+ return fmt.Errorf("cert must not be valid for longer than 14 days (NotBefore: %s, NotAfter: %s, Length: %s)", cert.NotBefore, cert.NotAfter, l)
+ }
+ now := time.Now()
+ if now.Before(cert.NotBefore) || now.After(cert.NotAfter) {
+ return fmt.Errorf("cert not valid (NotBefore: %s, NotAfter: %s)", cert.NotBefore, cert.NotAfter)
+ }
+ return nil
+}
+
+// deterministicReader is a hack. It counter-acts the Go library's attempt at
+// making ECDSA signatures non-deterministic. Go adds non-determinism by
+// randomly dropping a singly byte from the reader stream. This counteracts this
+// by detecting when a read is a single byte and using a different reader
+// instead.
+type deterministicReader struct {
+ reader io.Reader
+ singleByteReader io.Reader
+}
+
+func newDeterministicReader(seed []byte, salt []byte, info string) io.Reader {
+ reader := hkdf.New(sha256.New, seed, salt, []byte(info))
+ singleByteReader := hkdf.New(sha256.New, seed, salt, []byte(info+" single byte"))
+
+ return &deterministicReader{
+ reader: reader,
+ singleByteReader: singleByteReader,
+ }
+}
+
+func (r *deterministicReader) Read(p []byte) (n int, err error) {
+ if len(p) == 1 {
+ return r.singleByteReader.Read(p)
+ }
+ return r.reader.Read(p)
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go
new file mode 100644
index 000000000..337239fa8
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go
@@ -0,0 +1,216 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/webtransport-go"
+)
+
+const queueLen = 16
+const handshakeTimeout = 10 * time.Second
+
+type listener struct {
+ transport *transport
+ isStaticTLSConf bool
+ reuseListener quicreuse.Listener
+
+ server webtransport.Server
+
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ serverClosed chan struct{} // is closed when server.Serve returns
+
+ addr net.Addr
+ multiaddr ma.Multiaddr
+
+ queue chan tpt.CapableConn
+}
+
+var _ tpt.Listener = &listener{}
+
+func newListener(reuseListener quicreuse.Listener, t *transport, isStaticTLSConf bool) (tpt.Listener, error) {
+ localMultiaddr, err := toWebtransportMultiaddr(reuseListener.Addr())
+ if err != nil {
+ return nil, err
+ }
+
+ ln := &listener{
+ reuseListener: reuseListener,
+ transport: t,
+ isStaticTLSConf: isStaticTLSConf,
+ queue: make(chan tpt.CapableConn, queueLen),
+ serverClosed: make(chan struct{}),
+ addr: reuseListener.Addr(),
+ multiaddr: localMultiaddr,
+ server: webtransport.Server{
+ CheckOrigin: func(r *http.Request) bool { return true },
+ },
+ }
+ ln.ctx, ln.ctxCancel = context.WithCancel(context.Background())
+ mux := http.NewServeMux()
+ mux.HandleFunc(webtransportHTTPEndpoint, ln.httpHandler)
+ ln.server.H3.Handler = mux
+ go func() {
+ defer close(ln.serverClosed)
+ for {
+ conn, err := ln.reuseListener.Accept(context.Background())
+ if err != nil {
+ log.Debugw("serving failed", "addr", ln.Addr(), "error", err)
+ return
+ }
+ go ln.server.ServeQUICConn(conn)
+ }
+ }()
+ return ln, nil
+}
+
+func (l *listener) httpHandler(w http.ResponseWriter, r *http.Request) {
+ typ, ok := r.URL.Query()["type"]
+ if !ok || len(typ) != 1 || typ[0] != "noise" {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ remoteMultiaddr, err := stringToWebtransportMultiaddr(r.RemoteAddr)
+ if err != nil {
+ // This should never happen.
+ log.Errorw("converting remote address failed", "remote", r.RemoteAddr, "error", err)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ if l.transport.gater != nil && !l.transport.gater.InterceptAccept(&connMultiaddrs{local: l.multiaddr, remote: remoteMultiaddr}) {
+ w.WriteHeader(http.StatusForbidden)
+ return
+ }
+
+ connScope, err := l.transport.rcmgr.OpenConnection(network.DirInbound, false, remoteMultiaddr)
+ if err != nil {
+ log.Debugw("resource manager blocked incoming connection", "addr", r.RemoteAddr, "error", err)
+ w.WriteHeader(http.StatusServiceUnavailable)
+ return
+ }
+ err = l.httpHandlerWithConnScope(w, r, connScope)
+ if err != nil {
+ connScope.Done()
+ }
+}
+
+func (l *listener) httpHandlerWithConnScope(w http.ResponseWriter, r *http.Request, connScope network.ConnManagementScope) error {
+ sess, err := l.server.Upgrade(w, r)
+ if err != nil {
+ log.Debugw("upgrade failed", "error", err)
+ // TODO: think about the status code to use here
+ w.WriteHeader(500)
+ return err
+ }
+ ctx, cancel := context.WithTimeout(l.ctx, handshakeTimeout)
+ sconn, err := l.handshake(ctx, sess)
+ if err != nil {
+ cancel()
+ log.Debugw("handshake failed", "error", err)
+ sess.CloseWithError(1, "")
+ return err
+ }
+ cancel()
+
+ if l.transport.gater != nil && !l.transport.gater.InterceptSecured(network.DirInbound, sconn.RemotePeer(), sconn) {
+ // TODO: can we close with a specific error here?
+ sess.CloseWithError(errorCodeConnectionGating, "")
+ return errors.New("gater blocked connection")
+ }
+
+ if err := connScope.SetPeer(sconn.RemotePeer()); err != nil {
+ log.Debugw("resource manager blocked incoming connection for peer", "peer", sconn.RemotePeer(), "addr", r.RemoteAddr, "error", err)
+ sess.CloseWithError(1, "")
+ return err
+ }
+
+ conn := newConn(l.transport, sess, sconn, connScope)
+ l.transport.addConn(sess, conn)
+ select {
+ case l.queue <- conn:
+ default:
+ log.Debugw("accept queue full, dropping incoming connection", "peer", sconn.RemotePeer(), "addr", r.RemoteAddr, "error", err)
+ sess.CloseWithError(1, "")
+ return errors.New("accept queue full")
+ }
+
+ return nil
+}
+
+func (l *listener) Accept() (tpt.CapableConn, error) {
+ select {
+ case <-l.ctx.Done():
+ return nil, tpt.ErrListenerClosed
+ case c := <-l.queue:
+ return c, nil
+ }
+}
+
+func (l *listener) handshake(ctx context.Context, sess *webtransport.Session) (*connSecurityMultiaddrs, error) {
+ local, err := toWebtransportMultiaddr(sess.LocalAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determiniting local addr: %w", err)
+ }
+ remote, err := toWebtransportMultiaddr(sess.RemoteAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determiniting remote addr: %w", err)
+ }
+
+ str, err := sess.AcceptStream(ctx)
+ if err != nil {
+ return nil, err
+ }
+ var earlyData [][]byte
+ if !l.isStaticTLSConf {
+ earlyData = l.transport.certManager.SerializedCertHashes()
+ }
+
+ n, err := l.transport.noise.WithSessionOptions(noise.EarlyData(
+ nil,
+ newEarlyDataSender(&pb.NoiseExtensions{WebtransportCerthashes: earlyData}),
+ ))
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize Noise session: %w", err)
+ }
+ c, err := n.SecureInbound(ctx, &webtransportStream{Stream: str, wsess: sess}, "")
+ if err != nil {
+ return nil, err
+ }
+
+ return &connSecurityMultiaddrs{
+ ConnSecurity: c,
+ ConnMultiaddrs: &connMultiaddrs{local: local, remote: remote},
+ }, nil
+}
+
+func (l *listener) Addr() net.Addr {
+ return l.addr
+}
+
+func (l *listener) Multiaddr() ma.Multiaddr {
+ if l.transport.certManager == nil {
+ return l.multiaddr
+ }
+ return l.multiaddr.Encapsulate(l.transport.certManager.AddrComponent())
+}
+
+func (l *listener) Close() error {
+ l.ctxCancel()
+ l.reuseListener.Close()
+ err := l.server.Close()
+ <-l.serverClosed
+ return err
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go
new file mode 100644
index 000000000..d6930af36
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go
@@ -0,0 +1,107 @@
+package libp2pwebtransport
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+)
+
+var webtransportMA = ma.StringCast("/quic-v1/webtransport")
+
+func toWebtransportMultiaddr(na net.Addr) (ma.Multiaddr, error) {
+ addr, err := manet.FromNetAddr(na)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := addr.ValueForProtocol(ma.P_UDP); err != nil {
+ return nil, errors.New("not a UDP address")
+ }
+ return addr.Encapsulate(webtransportMA), nil
+}
+
+func stringToWebtransportMultiaddr(str string) (ma.Multiaddr, error) {
+ host, portStr, err := net.SplitHostPort(str)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return nil, errors.New("failed to parse IP")
+ }
+ return toWebtransportMultiaddr(&net.UDPAddr{IP: ip, Port: int(port)})
+}
+
+func extractCertHashes(addr ma.Multiaddr) ([]multihash.DecodedMultihash, error) {
+ certHashesStr := make([]string, 0, 2)
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ certHashesStr = append(certHashesStr, c.Value())
+ }
+ return true
+ })
+ certHashes := make([]multihash.DecodedMultihash, 0, len(certHashesStr))
+ for _, s := range certHashesStr {
+ _, ch, err := multibase.Decode(s)
+ if err != nil {
+ return nil, fmt.Errorf("failed to multibase-decode certificate hash: %w", err)
+ }
+ dh, err := multihash.Decode(ch)
+ if err != nil {
+ return nil, fmt.Errorf("failed to multihash-decode certificate hash: %w", err)
+ }
+ certHashes = append(certHashes, *dh)
+ }
+ return certHashes, nil
+}
+
+func addrComponentForCert(hash []byte) (ma.Multiaddr, error) {
+ mh, err := multihash.Encode(hash, multihash.SHA2_256)
+ if err != nil {
+ return nil, err
+ }
+ certStr, err := multibase.Encode(multibase.Base58BTC, mh)
+ if err != nil {
+ return nil, err
+ }
+ return ma.NewComponent(ma.ProtocolWithCode(ma.P_CERTHASH).Name, certStr)
+}
+
+// IsWebtransportMultiaddr returns true if the given multiaddr is a well formed
+// webtransport multiaddr. Returns the number of certhashes found.
+func IsWebtransportMultiaddr(multiaddr ma.Multiaddr) (bool, int) {
+ const (
+ init = iota
+ foundUDP
+ foundQuicV1
+ foundWebTransport
+ )
+ state := init
+ certhashCount := 0
+
+ ma.ForEach(multiaddr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_QUIC_V1 && state == init {
+ state = foundUDP
+ }
+ if c.Protocol().Code == ma.P_QUIC_V1 && state == foundUDP {
+ state = foundQuicV1
+ }
+ if c.Protocol().Code == ma.P_WEBTRANSPORT && state == foundQuicV1 {
+ state = foundWebTransport
+ }
+ if c.Protocol().Code == ma.P_CERTHASH && state == foundWebTransport {
+ certhashCount++
+ }
+ return true
+ })
+ return state == foundWebTransport, certhashCount
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go
new file mode 100644
index 000000000..6ca8d9ddb
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go
@@ -0,0 +1,36 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+)
+
+type earlyDataHandler struct {
+ earlyData *pb.NoiseExtensions
+ receive func(extensions *pb.NoiseExtensions) error
+}
+
+var _ noise.EarlyDataHandler = &earlyDataHandler{}
+
+func newEarlyDataSender(earlyData *pb.NoiseExtensions) noise.EarlyDataHandler {
+ return &earlyDataHandler{earlyData: earlyData}
+}
+
+func newEarlyDataReceiver(receive func(*pb.NoiseExtensions) error) noise.EarlyDataHandler {
+ return &earlyDataHandler{receive: receive}
+}
+
+func (e *earlyDataHandler) Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions {
+ return e.earlyData
+}
+
+func (e *earlyDataHandler) Received(_ context.Context, _ net.Conn, ext *pb.NoiseExtensions) error {
+ if e.receive == nil {
+ return nil
+ }
+ return e.receive(ext)
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go
new file mode 100644
index 000000000..0849fc9f3
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go
@@ -0,0 +1,71 @@
+package libp2pwebtransport
+
+import (
+ "errors"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/quic-go/webtransport-go"
+)
+
+const (
+ reset webtransport.StreamErrorCode = 0
+)
+
+type webtransportStream struct {
+ webtransport.Stream
+ wsess *webtransport.Session
+}
+
+var _ net.Conn = &webtransportStream{}
+
+func (s *webtransportStream) LocalAddr() net.Addr {
+ return s.wsess.LocalAddr()
+}
+
+func (s *webtransportStream) RemoteAddr() net.Addr {
+ return s.wsess.RemoteAddr()
+}
+
+type stream struct {
+ webtransport.Stream
+}
+
+var _ network.MuxedStream = &stream{}
+
+func (s *stream) Read(b []byte) (n int, err error) {
+ n, err = s.Stream.Read(b)
+ if err != nil && errors.Is(err, &webtransport.StreamError{}) {
+ err = network.ErrReset
+ }
+ return n, err
+}
+
+func (s *stream) Write(b []byte) (n int, err error) {
+ n, err = s.Stream.Write(b)
+ if err != nil && errors.Is(err, &webtransport.StreamError{}) {
+ err = network.ErrReset
+ }
+ return n, err
+}
+
+func (s *stream) Reset() error {
+ s.Stream.CancelRead(reset)
+ s.Stream.CancelWrite(reset)
+ return nil
+}
+
+func (s *stream) Close() error {
+ s.Stream.CancelRead(reset)
+ return s.Stream.Close()
+}
+
+func (s *stream) CloseRead() error {
+ s.Stream.CancelRead(reset)
+ return nil
+}
+
+func (s *stream) CloseWrite() error {
+ return s.Stream.Close()
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go
new file mode 100644
index 000000000..f9c68ddf3
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go
@@ -0,0 +1,414 @@
+package libp2pwebtransport
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ "github.com/benbjohnson/clock"
+ logging "github.com/ipfs/go-log/v2"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multihash"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/webtransport-go"
+)
+
+var log = logging.Logger("webtransport")
+
+const webtransportHTTPEndpoint = "/.well-known/libp2p-webtransport"
+
+const errorCodeConnectionGating = 0x47415445 // GATE in ASCII
+
+const certValidity = 14 * 24 * time.Hour
+
+type Option func(*transport) error
+
+func WithClock(cl clock.Clock) Option {
+ return func(t *transport) error {
+ t.clock = cl
+ return nil
+ }
+}
+
+// WithTLSClientConfig sets a custom tls.Config used for dialing.
+// This option is most useful for setting a custom tls.Config.RootCAs certificate pool.
+// When dialing a multiaddr that contains a /certhash component, this library will set InsecureSkipVerify and
+// overwrite the VerifyPeerCertificate callback.
+func WithTLSClientConfig(c *tls.Config) Option {
+ return func(t *transport) error {
+ t.tlsClientConf = c
+ return nil
+ }
+}
+
+type transport struct {
+ privKey ic.PrivKey
+ pid peer.ID
+ clock clock.Clock
+
+ connManager *quicreuse.ConnManager
+ rcmgr network.ResourceManager
+ gater connmgr.ConnectionGater
+
+ listenOnce sync.Once
+ listenOnceErr error
+ certManager *certManager
+ hasCertManager atomic.Bool // set to true once the certManager is initialized
+ staticTLSConf *tls.Config
+ tlsClientConf *tls.Config
+
+ noise *noise.Transport
+
+ connMx sync.Mutex
+ conns map[uint64]*conn // using quic-go's ConnectionTracingKey as map key
+}
+
+var _ tpt.Transport = &transport{}
+var _ tpt.Resolver = &transport{}
+var _ io.Closer = &transport{}
+
+func New(key ic.PrivKey, psk pnet.PSK, connManager *quicreuse.ConnManager, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, opts ...Option) (tpt.Transport, error) {
+ if len(psk) > 0 {
+ log.Error("WebTransport doesn't support private networks yet.")
+ return nil, errors.New("WebTransport doesn't support private networks yet")
+ }
+ if rcmgr == nil {
+ rcmgr = &network.NullResourceManager{}
+ }
+ id, err := peer.IDFromPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ t := &transport{
+ pid: id,
+ privKey: key,
+ rcmgr: rcmgr,
+ gater: gater,
+ clock: clock.New(),
+ connManager: connManager,
+ conns: map[uint64]*conn{},
+ }
+ for _, opt := range opts {
+ if err := opt(t); err != nil {
+ return nil, err
+ }
+ }
+ n, err := noise.New(noise.ID, key, nil)
+ if err != nil {
+ return nil, err
+ }
+ t.noise = n
+ return t, nil
+}
+
+func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) {
+ scope, err := t.rcmgr.OpenConnection(network.DirOutbound, false, raddr)
+ if err != nil {
+ log.Debugw("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err)
+ return nil, err
+ }
+
+ c, err := t.dialWithScope(ctx, raddr, p, scope)
+ if err != nil {
+ scope.Done()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (tpt.CapableConn, error) {
+ _, addr, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+ url := fmt.Sprintf("https://%s%s?type=noise", addr, webtransportHTTPEndpoint)
+ certHashes, err := extractCertHashes(raddr)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(certHashes) == 0 {
+ return nil, errors.New("can't dial webtransport without certhashes")
+ }
+
+ sni, _ := extractSNI(raddr)
+
+ if err := scope.SetPeer(p); err != nil {
+ log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err)
+ return nil, err
+ }
+
+ maddr, _ := ma.SplitFunc(raddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBTRANSPORT })
+ sess, err := t.dial(ctx, maddr, url, sni, certHashes)
+ if err != nil {
+ return nil, err
+ }
+ sconn, err := t.upgrade(ctx, sess, p, certHashes)
+ if err != nil {
+ sess.CloseWithError(1, "")
+ return nil, err
+ }
+ if t.gater != nil && !t.gater.InterceptSecured(network.DirOutbound, p, sconn) {
+ sess.CloseWithError(errorCodeConnectionGating, "")
+ return nil, fmt.Errorf("secured connection gated")
+ }
+ conn := newConn(t, sess, sconn, scope)
+ t.addConn(sess, conn)
+ return conn, nil
+}
+
+func (t *transport) dial(ctx context.Context, addr ma.Multiaddr, url, sni string, certHashes []multihash.DecodedMultihash) (*webtransport.Session, error) {
+ var tlsConf *tls.Config
+ if t.tlsClientConf != nil {
+ tlsConf = t.tlsClientConf.Clone()
+ } else {
+ tlsConf = &tls.Config{}
+ }
+ tlsConf.NextProtos = append(tlsConf.NextProtos, http3.NextProtoH3)
+
+ if sni != "" {
+ tlsConf.ServerName = sni
+ }
+
+ if len(certHashes) > 0 {
+ // This is not insecure. We verify the certificate ourselves.
+ // See https://www.w3.org/TR/webtransport/#certificate-hashes.
+ tlsConf.InsecureSkipVerify = true
+ tlsConf.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
+ return verifyRawCerts(rawCerts, certHashes)
+ }
+ }
+ conn, err := t.connManager.DialQUIC(ctx, addr, tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, err
+ }
+ dialer := webtransport.Dialer{
+ RoundTripper: &http3.RoundTripper{
+ Dial: func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
+ return conn.(quic.EarlyConnection), nil
+ },
+ },
+ }
+ rsp, sess, err := dialer.Dial(ctx, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ if rsp.StatusCode < 200 || rsp.StatusCode > 299 {
+ return nil, fmt.Errorf("invalid response status code: %d", rsp.StatusCode)
+ }
+ return sess, err
+}
+
+func (t *transport) upgrade(ctx context.Context, sess *webtransport.Session, p peer.ID, certHashes []multihash.DecodedMultihash) (*connSecurityMultiaddrs, error) {
+ local, err := toWebtransportMultiaddr(sess.LocalAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determining local addr: %w", err)
+ }
+ remote, err := toWebtransportMultiaddr(sess.RemoteAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determining remote addr: %w", err)
+ }
+
+ str, err := sess.OpenStreamSync(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Now run a Noise handshake (using early data) and get all the certificate hashes from the server.
+ // We will verify that the certhashes we used to dial is a subset of the certhashes we received from the server.
+ var verified bool
+ n, err := t.noise.WithSessionOptions(noise.EarlyData(newEarlyDataReceiver(func(b *pb.NoiseExtensions) error {
+ decodedCertHashes, err := decodeCertHashesFromProtobuf(b.WebtransportCerthashes)
+ if err != nil {
+ return err
+ }
+ for _, sent := range certHashes {
+ var found bool
+ for _, rcvd := range decodedCertHashes {
+ if sent.Code == rcvd.Code && bytes.Equal(sent.Digest, rcvd.Digest) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("missing cert hash: %v", sent)
+ }
+ }
+ verified = true
+ return nil
+ }), nil))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Noise transport: %w", err)
+ }
+ c, err := n.SecureOutbound(ctx, &webtransportStream{Stream: str, wsess: sess}, p)
+ if err != nil {
+ return nil, err
+ }
+ // The Noise handshake _should_ guarantee that our verification callback is called.
+ // Double-check just in case.
+ if !verified {
+ return nil, errors.New("didn't verify")
+ }
+ return &connSecurityMultiaddrs{
+ ConnSecurity: c,
+ ConnMultiaddrs: &connMultiaddrs{local: local, remote: remote},
+ }, nil
+}
+
+func decodeCertHashesFromProtobuf(b [][]byte) ([]multihash.DecodedMultihash, error) {
+ hashes := make([]multihash.DecodedMultihash, 0, len(b))
+ for _, h := range b {
+ dh, err := multihash.Decode(h)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode hash: %w", err)
+ }
+ hashes = append(hashes, *dh)
+ }
+ return hashes, nil
+}
+
+func (t *transport) CanDial(addr ma.Multiaddr) bool {
+ ok, _ := IsWebtransportMultiaddr(addr)
+ return ok
+}
+
+func (t *transport) Listen(laddr ma.Multiaddr) (tpt.Listener, error) {
+ isWebTransport, _ := IsWebtransportMultiaddr(laddr)
+ if !isWebTransport {
+ return nil, fmt.Errorf("cannot listen on non-WebTransport addr: %s", laddr)
+ }
+ if t.staticTLSConf == nil {
+ t.listenOnce.Do(func() {
+ t.certManager, t.listenOnceErr = newCertManager(t.privKey, t.clock)
+ t.hasCertManager.Store(true)
+ })
+ if t.listenOnceErr != nil {
+ return nil, t.listenOnceErr
+ }
+ } else {
+ return nil, errors.New("static TLS config not supported on WebTransport")
+ }
+ tlsConf := t.staticTLSConf.Clone()
+ if tlsConf == nil {
+ tlsConf = &tls.Config{GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) {
+ return t.certManager.GetConfig(), nil
+ }}
+ }
+ tlsConf.NextProtos = append(tlsConf.NextProtos, http3.NextProtoH3)
+
+ ln, err := t.connManager.ListenQUIC(laddr, tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, err
+ }
+ return newListener(ln, t, t.staticTLSConf != nil)
+}
+
+func (t *transport) Protocols() []int {
+ return []int{ma.P_WEBTRANSPORT}
+}
+
+func (t *transport) Proxy() bool {
+ return false
+}
+
+func (t *transport) Close() error {
+ t.listenOnce.Do(func() {})
+ if t.certManager != nil {
+ return t.certManager.Close()
+ }
+ return nil
+}
+
+func (t *transport) allowWindowIncrease(conn quic.Connection, size uint64) bool {
+ t.connMx.Lock()
+ defer t.connMx.Unlock()
+
+ c, ok := t.conns[conn.Context().Value(quic.ConnectionTracingKey).(uint64)]
+ if !ok {
+ return false
+ }
+ return c.allowWindowIncrease(size)
+}
+
+func (t *transport) addConn(sess *webtransport.Session, c *conn) {
+ t.connMx.Lock()
+ t.conns[sess.Context().Value(quic.ConnectionTracingKey).(uint64)] = c
+ t.connMx.Unlock()
+}
+
+func (t *transport) removeConn(sess *webtransport.Session) {
+ t.connMx.Lock()
+ delete(t.conns, sess.Context().Value(quic.ConnectionTracingKey).(uint64))
+ t.connMx.Unlock()
+}
+
+// extractSNI returns what the SNI should be for the given maddr. If there is an
+// SNI component in the multiaddr, then it will be returned and
+// foundSniComponent will be true. If there's no SNI component, but there is a
+// DNS-like component, then that will be returned for the sni and
+// foundSniComponent will be false (since we didn't find an actual sni component).
+func extractSNI(maddr ma.Multiaddr) (sni string, foundSniComponent bool) {
+ ma.ForEach(maddr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_SNI:
+ sni = c.Value()
+ foundSniComponent = true
+ return false
+ case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR:
+ sni = c.Value()
+ // Keep going in case we find an `sni` component
+ return true
+ }
+ return true
+ })
+ return sni, foundSniComponent
+}
+
+// Resolve implements transport.Resolver
+func (t *transport) Resolve(_ context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) {
+ sni, foundSniComponent := extractSNI(maddr)
+
+ if foundSniComponent || sni == "" {
+ // The multiaddr already had an sni field, we can keep using it. Or we don't have any sni like thing
+ return []ma.Multiaddr{maddr}, nil
+ }
+
+ beforeQuicMA, afterIncludingQuicMA := ma.SplitFunc(maddr, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_QUIC_V1
+ })
+ quicComponent, afterQuicMA := ma.SplitFirst(afterIncludingQuicMA)
+ sniComponent, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_SNI).Name, sni)
+ if err != nil {
+ return nil, err
+ }
+ return []ma.Multiaddr{beforeQuicMA.Encapsulate(quicComponent).Encapsulate(sniComponent).Encapsulate(afterQuicMA)}, nil
+}
+
+// AddCertHashes adds the current certificate hashes to a multiaddress.
+// If called before Listen, it's a no-op.
+func (t *transport) AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool) {
+ if !t.hasCertManager.Load() {
+ return m, false
+ }
+ return m.Encapsulate(t.certManager.AddrComponent()), true
+}
diff --git a/vendor/github.com/libp2p/go-libp2p/tools.go b/vendor/github.com/libp2p/go-libp2p/tools.go
new file mode 100644
index 000000000..46a8037df
--- /dev/null
+++ b/vendor/github.com/libp2p/go-libp2p/tools.go
@@ -0,0 +1,9 @@
+//go:build tools
+
+package libp2p
+
+import (
+ _ "github.com/golang/mock/mockgen"
+ _ "golang.org/x/tools/cmd/goimports"
+ _ "google.golang.org/protobuf/cmd/protoc-gen-go"
+)
diff --git a/vendor/github.com/libp2p/go-libp2p/version.json b/vendor/github.com/libp2p/go-libp2p/version.json
index 6578f1967..dffa0bb73 100644
--- a/vendor/github.com/libp2p/go-libp2p/version.json
+++ b/vendor/github.com/libp2p/go-libp2p/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.22.0"
+ "version": "v0.28.2"
}
diff --git a/vendor/github.com/libp2p/go-msgio/fuzz.go b/vendor/github.com/libp2p/go-msgio/fuzz.go
index 3cf23ff6d..317f55508 100644
--- a/vendor/github.com/libp2p/go-msgio/fuzz.go
+++ b/vendor/github.com/libp2p/go-msgio/fuzz.go
@@ -1,5 +1,4 @@
//go:build gofuzz
-// +build gofuzz
package msgio
diff --git a/vendor/github.com/libp2p/go-msgio/msgio.go b/vendor/github.com/libp2p/go-msgio/msgio.go
index 5e61142c5..7543ea052 100644
--- a/vendor/github.com/libp2p/go-msgio/msgio.go
+++ b/vendor/github.com/libp2p/go-msgio/msgio.go
@@ -8,7 +8,7 @@ import (
pool "github.com/libp2p/go-buffer-pool"
)
-// ErrMsgTooLarge is returned when the message length is exessive
+// ErrMsgTooLarge is returned when the message length is exessive
var ErrMsgTooLarge = errors.New("message too large")
const (
diff --git a/vendor/github.com/libp2p/go-msgio/num.go b/vendor/github.com/libp2p/go-msgio/num.go
index 513c1994f..8ece5ec8a 100644
--- a/vendor/github.com/libp2p/go-msgio/num.go
+++ b/vendor/github.com/libp2p/go-msgio/num.go
@@ -16,8 +16,9 @@ func WriteLen(w io.Writer, l int) error {
// ReadLen reads a length from the given reader.
// if buf is non-nil, it reuses the buffer. Ex:
-// l, err := ReadLen(r, nil)
-// _, err := ReadLen(r, buf)
+//
+// l, err := ReadLen(r, nil)
+// _, err := ReadLen(r, buf)
func ReadLen(r io.Reader, buf []byte) (int, error) {
if len(buf) < 4 {
buf = make([]byte, 4)
diff --git a/vendor/github.com/libp2p/go-msgio/pbio/interfaces.go b/vendor/github.com/libp2p/go-msgio/pbio/interfaces.go
new file mode 100644
index 000000000..e3dffb7bc
--- /dev/null
+++ b/vendor/github.com/libp2p/go-msgio/pbio/interfaces.go
@@ -0,0 +1,40 @@
+// Package pbio reads and writes varint-prefix protobufs, using Google's Protobuf package.
+package pbio
+
+import (
+ "io"
+
+ "google.golang.org/protobuf/proto"
+)
+
+type Writer interface {
+ WriteMsg(proto.Message) error
+}
+
+type WriteCloser interface {
+ Writer
+ io.Closer
+}
+
+type Reader interface {
+ ReadMsg(msg proto.Message) error
+}
+
+type ReadCloser interface {
+ Reader
+ io.Closer
+}
+
+func getSize(v interface{}) (int, bool) {
+ if sz, ok := v.(interface {
+ Size() (n int)
+ }); ok {
+ return sz.Size(), true
+ } else if sz, ok := v.(interface {
+ ProtoSize() (n int)
+ }); ok {
+ return sz.ProtoSize(), true
+ } else {
+ return 0, false
+ }
+}
diff --git a/vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go
new file mode 100644
index 000000000..415214763
--- /dev/null
+++ b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go
@@ -0,0 +1,93 @@
+// Adapted from gogo/protobuf to use multiformats/go-varint for
+// efficient, interoperable length-prefixing.
+//
+// # Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright
+//
+// notice, this list of conditions and the following disclaimer.
+// - Redistributions in binary form must reproduce the above
+//
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+package pbio
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "runtime/debug"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/multiformats/go-varint"
+)
+
+type uvarintReader struct {
+ r *bufio.Reader
+ buf []byte
+ maxSize int
+ closer io.Closer
+}
+
+func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser {
+ var closer io.Closer
+ if c, ok := r.(io.Closer); ok {
+ closer = c
+ }
+ return &uvarintReader{bufio.NewReader(r), nil, maxSize, closer}
+}
+
+func (ur *uvarintReader) ReadMsg(msg proto.Message) (err error) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
+ err = fmt.Errorf("panic reading message: %s", rerr)
+ }
+ }()
+
+ length64, err := varint.ReadUvarint(ur.r)
+ if err != nil {
+ return err
+ }
+ length := int(length64)
+ if length < 0 || length > ur.maxSize {
+ return io.ErrShortBuffer
+ }
+ if len(ur.buf) < length {
+ ur.buf = make([]byte, length)
+ }
+ buf := ur.buf[:length]
+ if _, err := io.ReadFull(ur.r, buf); err != nil {
+ return err
+ }
+ return proto.Unmarshal(buf, msg)
+}
+
+func (ur *uvarintReader) Close() error {
+ if ur.closer != nil {
+ return ur.closer.Close()
+ }
+ return nil
+}
diff --git a/vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go
new file mode 100644
index 000000000..2f18059fb
--- /dev/null
+++ b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go
@@ -0,0 +1,103 @@
+// Adapted from gogo/protobuf to use multiformats/go-varint for
+// efficient, interoperable length-prefixing.
+//
+// # Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright
+//
+// notice, this list of conditions and the following disclaimer.
+// - Redistributions in binary form must reproduce the above
+//
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+package pbio
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "runtime/debug"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/multiformats/go-varint"
+)
+
+type uvarintWriter struct {
+ w io.Writer
+ lenBuf []byte
+ buffer []byte
+}
+
+func NewDelimitedWriter(w io.Writer) WriteCloser {
+ return &uvarintWriter{w, make([]byte, varint.MaxLenUvarint63), nil}
+}
+
+func (uw *uvarintWriter) WriteMsg(msg proto.Message) (err error) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
+ err = fmt.Errorf("panic reading message: %s", rerr)
+ }
+ }()
+
+ var data []byte
+ if m, ok := msg.(interface {
+ MarshalTo(data []byte) (n int, err error)
+ }); ok {
+ n, ok := getSize(m)
+ if ok {
+ if n+varint.MaxLenUvarint63 >= len(uw.buffer) {
+ uw.buffer = make([]byte, n+varint.MaxLenUvarint63)
+ }
+ lenOff := varint.PutUvarint(uw.buffer, uint64(n))
+ _, err = m.MarshalTo(uw.buffer[lenOff:])
+ if err != nil {
+ return err
+ }
+ _, err = uw.w.Write(uw.buffer[:lenOff+n])
+ return err
+ }
+ }
+
+ // fallback
+ data, err = proto.Marshal(msg)
+ if err != nil {
+ return err
+ }
+ length := uint64(len(data))
+ n := varint.PutUvarint(uw.lenBuf, length)
+ _, err = uw.w.Write(uw.lenBuf[:n])
+ if err != nil {
+ return err
+ }
+ _, err = uw.w.Write(data)
+ return err
+}
+
+func (uw *uvarintWriter) Close() error {
+ if closer, ok := uw.w.(io.Closer); ok {
+ return closer.Close()
+ }
+ return nil
+}
diff --git a/vendor/github.com/libp2p/go-msgio/protoio/interfaces.go b/vendor/github.com/libp2p/go-msgio/protoio/interfaces.go
index 1a42713e4..aaa605566 100644
--- a/vendor/github.com/libp2p/go-msgio/protoio/interfaces.go
+++ b/vendor/github.com/libp2p/go-msgio/protoio/interfaces.go
@@ -1,8 +1,7 @@
-//
// Adapted from gogo/protobuf to use multiformats/go-varint for
// efficient, interoperable length-prefixing.
//
-// Protocol Buffers for Go with Gadgets
+// # Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
@@ -11,9 +10,11 @@
// modification, are permitted provided that the following conditions are
// met:
//
-// * Redistributions of source code must retain the above copyright
+// - Redistributions of source code must retain the above copyright
+//
// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
+// - Redistributions in binary form must reproduce the above
+//
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
@@ -30,6 +31,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
+// Deprecated: GoGo Protobuf is deprecated and unmaintained.
package protoio
import (
diff --git a/vendor/github.com/libp2p/go-msgio/protoio/uvarint_reader.go b/vendor/github.com/libp2p/go-msgio/protoio/uvarint_reader.go
index 117346715..6722cd3dd 100644
--- a/vendor/github.com/libp2p/go-msgio/protoio/uvarint_reader.go
+++ b/vendor/github.com/libp2p/go-msgio/protoio/uvarint_reader.go
@@ -1,8 +1,7 @@
-//
// Adapted from gogo/protobuf to use multiformats/go-varint for
// efficient, interoperable length-prefixing.
//
-// Protocol Buffers for Go with Gadgets
+// # Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
@@ -11,9 +10,11 @@
// modification, are permitted provided that the following conditions are
// met:
//
-// * Redistributions of source code must retain the above copyright
+// - Redistributions of source code must retain the above copyright
+//
// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
+// - Redistributions in binary form must reproduce the above
+//
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
@@ -29,7 +30,6 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
package protoio
import (
diff --git a/vendor/github.com/libp2p/go-msgio/protoio/uvarint_writer.go b/vendor/github.com/libp2p/go-msgio/protoio/uvarint_writer.go
index ace982e11..e3110752c 100644
--- a/vendor/github.com/libp2p/go-msgio/protoio/uvarint_writer.go
+++ b/vendor/github.com/libp2p/go-msgio/protoio/uvarint_writer.go
@@ -1,8 +1,7 @@
-//
// Adapted from gogo/protobuf to use multiformats/go-varint for
// efficient, interoperable length-prefixing.
//
-// Protocol Buffers for Go with Gadgets
+// # Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
@@ -11,9 +10,11 @@
// modification, are permitted provided that the following conditions are
// met:
//
-// * Redistributions of source code must retain the above copyright
+// - Redistributions of source code must retain the above copyright
+//
// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
+// - Redistributions in binary form must reproduce the above
+//
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
@@ -29,7 +30,6 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
package protoio
import (
diff --git a/vendor/github.com/libp2p/go-msgio/version.json b/vendor/github.com/libp2p/go-msgio/version.json
index 1437d5b73..a654d65ab 100644
--- a/vendor/github.com/libp2p/go-msgio/version.json
+++ b/vendor/github.com/libp2p/go-msgio/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.2.0"
+ "version": "v0.3.0"
}
diff --git a/vendor/github.com/libp2p/go-nat/nat.go b/vendor/github.com/libp2p/go-nat/nat.go
index 6d295e66f..6b3e19c44 100644
--- a/vendor/github.com/libp2p/go-nat/nat.go
+++ b/vendor/github.com/libp2p/go-nat/nat.go
@@ -29,10 +29,10 @@ type NAT interface {
GetInternalAddress() (addr net.IP, err error)
// AddPortMapping maps a port on the local host to an external port.
- AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (mappedExternalPort int, err error)
+ AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (mappedExternalPort int, err error)
// DeletePortMapping removes a port mapping.
- DeletePortMapping(protocol string, internalPort int) (err error)
+ DeletePortMapping(ctx context.Context, protocol string, internalPort int) (err error)
}
// DiscoverNATs returns all NATs discovered in the network.
@@ -118,7 +118,8 @@ func DiscoverGateway(ctx context.Context) (NAT, error) {
return bestNAT, nil
}
+var random = rand.New(rand.NewSource(time.Now().UnixNano()))
+
func randomPort() int {
- rand.Seed(time.Now().UnixNano())
- return rand.Intn(math.MaxUint16-10000) + 10000
+ return random.Intn(math.MaxUint16-10000) + 10000
}
diff --git a/vendor/github.com/libp2p/go-nat/natpmp.go b/vendor/github.com/libp2p/go-nat/natpmp.go
index 495d42b49..2378d8d7e 100644
--- a/vendor/github.com/libp2p/go-nat/natpmp.go
+++ b/vendor/github.com/libp2p/go-nat/natpmp.go
@@ -5,7 +5,7 @@ import (
"net"
"time"
- "github.com/jackpal/go-nat-pmp"
+ natpmp "github.com/jackpal/go-nat-pmp"
)
var (
@@ -95,7 +95,7 @@ func (n *natpmpNAT) GetExternalAddress() (addr net.IP, err error) {
return net.IPv4(d[0], d[1], d[2], d[3]), nil
}
-func (n *natpmpNAT) AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
+func (n *natpmpNAT) AddPortMapping(_ context.Context, protocol string, internalPort int, _ string, timeout time.Duration) (int, error) {
var (
err error
)
@@ -122,7 +122,7 @@ func (n *natpmpNAT) AddPortMapping(protocol string, internalPort int, descriptio
return 0, err
}
-func (n *natpmpNAT) DeletePortMapping(protocol string, internalPort int) (err error) {
+func (n *natpmpNAT) DeletePortMapping(_ context.Context, _ string, internalPort int) (err error) {
delete(n.ports, internalPort)
return nil
}
diff --git a/vendor/github.com/libp2p/go-nat/upnp.go b/vendor/github.com/libp2p/go-nat/upnp.go
index ccfeb14a2..c50b952bb 100644
--- a/vendor/github.com/libp2p/go-nat/upnp.go
+++ b/vendor/github.com/libp2p/go-nat/upnp.go
@@ -14,9 +14,7 @@ import (
"github.com/koron/go-ssdp"
)
-var (
- _ NAT = (*upnp_NAT)(nil)
-)
+var _ NAT = (*upnp_NAT)(nil)
func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
res := make(chan NAT)
@@ -24,7 +22,7 @@ func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
defer close(res)
// find devices
- devs, err := goupnp.DiscoverDevices(internetgateway1.URN_WANConnectionDevice_1)
+ devs, err := goupnp.DiscoverDevicesCtx(ctx, internetgateway1.URN_WANConnectionDevice_1)
if err != nil {
return
}
@@ -45,7 +43,7 @@ func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-IP1)", dev.Root}:
@@ -59,7 +57,7 @@ func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-PPP1)", dev.Root}:
@@ -81,7 +79,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
defer close(res)
// find devices
- devs, err := goupnp.DiscoverDevices(internetgateway2.URN_WANConnectionDevice_2)
+ devs, err := goupnp.DiscoverDevicesCtx(ctx, internetgateway2.URN_WANConnectionDevice_2)
if err != nil {
return
}
@@ -102,7 +100,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG2-IP1)", dev.Root}:
@@ -116,7 +114,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG2-IP2)", dev.Root}:
@@ -130,7 +128,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG2-PPP1)", dev.Root}:
@@ -167,7 +165,7 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
if err != nil {
return
}
- RootDevice, err := goupnp.DeviceByURL(DeviceURL)
+ RootDevice, err := goupnp.DeviceByURLCtx(ctx, DeviceURL)
if err != nil {
return
}
@@ -183,7 +181,7 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
RootDevice: RootDevice,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-IP1)", RootDevice}:
@@ -197,7 +195,7 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
RootDevice: RootDevice,
Service: srv,
}}
- _, isNat, err := client.GetNATRSIPStatus()
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-PPP1)", RootDevice}:
@@ -213,8 +211,8 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
type upnp_NAT_Client interface {
GetExternalIPAddress() (string, error)
- AddPortMapping(string, uint16, string, uint16, string, bool, string, uint32) error
- DeletePortMapping(string, uint16, string) error
+ AddPortMappingCtx(context.Context, string, uint16, string, uint16, string, bool, string, uint32) error
+ DeletePortMappingCtx(context.Context, string, uint16, string) error
}
type upnp_NAT struct {
@@ -249,7 +247,7 @@ func mapProtocol(s string) string {
}
}
-func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
+func (u *upnp_NAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
ip, err := u.GetInternalAddress()
if err != nil {
return 0, nil
@@ -258,7 +256,7 @@ func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description
timeoutInSeconds := uint32(timeout / time.Second)
if externalPort := u.ports[internalPort]; externalPort > 0 {
- err = u.c.AddPortMapping("", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
+ err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
if err == nil {
return externalPort, nil
}
@@ -266,7 +264,7 @@ func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description
for i := 0; i < 3; i++ {
externalPort := randomPort()
- err = u.c.AddPortMapping("", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
+ err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
if err == nil {
u.ports[internalPort] = externalPort
return externalPort, nil
@@ -276,10 +274,10 @@ func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description
return 0, err
}
-func (u *upnp_NAT) DeletePortMapping(protocol string, internalPort int) error {
+func (u *upnp_NAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
if externalPort := u.ports[internalPort]; externalPort > 0 {
delete(u.ports, internalPort)
- return u.c.DeletePortMapping("", uint16(externalPort), mapProtocol(protocol))
+ return u.c.DeletePortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol))
}
return nil
diff --git a/vendor/github.com/libp2p/go-nat/version.json b/vendor/github.com/libp2p/go-nat/version.json
new file mode 100644
index 000000000..1437d5b73
--- /dev/null
+++ b/vendor/github.com/libp2p/go-nat/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.2.0"
+}
diff --git a/vendor/github.com/libp2p/go-netroute/common.go b/vendor/github.com/libp2p/go-netroute/common.go
index 7836e5e59..5007397b3 100644
--- a/vendor/github.com/libp2p/go-netroute/common.go
+++ b/vendor/github.com/libp2p/go-netroute/common.go
@@ -6,8 +6,8 @@
// Originally found in
// https://github.com/google/gopacket/blob/master/routing/routing.go
-// * Route selection modified to choose most selective route
-// to break ties when route priority is insufficient.
+// - Route selection modified to choose most selective route
+// to break ties when route priority is insufficient.
package netroute
import (
@@ -27,6 +27,34 @@ type rtInfo struct {
Priority uint32
}
+func (rt rtInfo) IsMoreSpecThan(mostSpecificRt *rtInfo) bool {
+ if mostSpecificRt == nil {
+ return true
+ }
+
+ var candSpec, curSpec int
+ if rt.Dst != nil {
+ candSpec, _ = rt.Dst.Mask.Size()
+ }
+ if mostSpecificRt.Dst != nil {
+ curSpec, _ = mostSpecificRt.Dst.Mask.Size()
+ }
+
+ if candSpec > curSpec {
+ return true
+ } else if candSpec < curSpec {
+ return false
+ }
+
+ // Windows and MacOS hasn't metric/priority on rule entry,
+ // But the interface device has the priority property.
+ //
+ // Before we find more correctly way on different OS platform,
+ // we keep the same rule selecting logical as before which
+ // is more later more special
+ return mostSpecificRt.Priority >= rt.Priority
+}
+
// routeSlice implements sort.Interface to sort routes by Priority.
type routeSlice []*rtInfo
@@ -121,19 +149,9 @@ func (r *router) route(routes routeSlice, input net.HardwareAddr, src, dst net.I
if rt.Dst != nil && !rt.Dst.Contains(dst) {
continue
}
- if mostSpecificRt != nil {
- var candSpec, curSpec int
- if rt.Dst != nil {
- candSpec, _ = rt.Dst.Mask.Size()
- }
- if mostSpecificRt.Dst != nil {
- curSpec, _ = mostSpecificRt.Dst.Mask.Size()
- }
- if candSpec < curSpec {
- continue
- }
+ if rt.IsMoreSpecThan(mostSpecificRt) {
+ mostSpecificRt = rt
}
- mostSpecificRt = rt
}
if mostSpecificRt != nil {
return int(mostSpecificRt.OutputIface), mostSpecificRt.Gateway, mostSpecificRt.PrefSrc, nil
diff --git a/vendor/github.com/libp2p/go-netroute/netroute_bsd.go b/vendor/github.com/libp2p/go-netroute/netroute_bsd.go
index 85304d975..cad94a983 100644
--- a/vendor/github.com/libp2p/go-netroute/netroute_bsd.go
+++ b/vendor/github.com/libp2p/go-netroute/netroute_bsd.go
@@ -5,7 +5,6 @@
// tree.
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
-// +build darwin dragonfly freebsd netbsd openbsd
// This is a BSD import for the routing structure initially found in
// https://github.com/google/gopacket/blob/master/routing/routing.go
diff --git a/vendor/github.com/libp2p/go-netroute/netroute_linux.go b/vendor/github.com/libp2p/go-netroute/netroute_linux.go
index 5994d4616..1f039db28 100644
--- a/vendor/github.com/libp2p/go-netroute/netroute_linux.go
+++ b/vendor/github.com/libp2p/go-netroute/netroute_linux.go
@@ -5,7 +5,6 @@
// tree.
//go:build linux
-// +build linux
// Generate a local routing table structure following the code at
// https://github.com/google/gopacket/blob/master/routing/routing.go
diff --git a/vendor/github.com/libp2p/go-netroute/netroute_stub.go b/vendor/github.com/libp2p/go-netroute/netroute_stub.go
index 68fbd4ae5..1235b456e 100644
--- a/vendor/github.com/libp2p/go-netroute/netroute_stub.go
+++ b/vendor/github.com/libp2p/go-netroute/netroute_stub.go
@@ -1,7 +1,6 @@
// A stub routing table conformant interface for js/wasm environments.
//go:build js && wasm
-// +build js,wasm
package netroute
diff --git a/vendor/github.com/libp2p/go-netroute/netroute_windows.go b/vendor/github.com/libp2p/go-netroute/netroute_windows.go
index d938316cb..67c501455 100644
--- a/vendor/github.com/libp2p/go-netroute/netroute_windows.go
+++ b/vendor/github.com/libp2p/go-netroute/netroute_windows.go
@@ -1,5 +1,4 @@
//go:build windows
-// +build windows
package netroute
@@ -194,12 +193,18 @@ func getIface(index uint32) *net.Interface {
return nil
}
+ physAddrLen := int(ifRow.PhysAddrLen)
+ if len(ifRow.PhysAddr) < physAddrLen && physAddrLen >= 0 {
+ physAddrLen = len(ifRow.PhysAddr)
+ }
+ physAddr := ifRow.PhysAddr[:physAddrLen]
+
ifaces, err := net.Interfaces()
if err != nil {
return nil
}
for _, iface := range ifaces {
- if bytes.Equal(iface.HardwareAddr, ifRow.PhysAddr[:]) {
+ if bytes.Equal(iface.HardwareAddr, physAddr) {
return &iface
}
}
diff --git a/vendor/github.com/libp2p/go-netroute/sockaddr_windows.go b/vendor/github.com/libp2p/go-netroute/sockaddr_windows.go
index 8d8d7b6e9..88f45d05b 100644
--- a/vendor/github.com/libp2p/go-netroute/sockaddr_windows.go
+++ b/vendor/github.com/libp2p/go-netroute/sockaddr_windows.go
@@ -1,5 +1,4 @@
//go:build windows
-// +build windows
package netroute
diff --git a/vendor/github.com/libp2p/go-netroute/version.json b/vendor/github.com/libp2p/go-netroute/version.json
index 1437d5b73..002fae3b8 100644
--- a/vendor/github.com/libp2p/go-netroute/version.json
+++ b/vendor/github.com/libp2p/go-netroute/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.2.0"
+ "version": "v0.2.1"
}
diff --git a/vendor/github.com/libp2p/go-openssl/.gitignore b/vendor/github.com/libp2p/go-openssl/.gitignore
deleted file mode 100644
index 805d350b7..000000000
--- a/vendor/github.com/libp2p/go-openssl/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-openssl.test
diff --git a/vendor/github.com/libp2p/go-openssl/AUTHORS b/vendor/github.com/libp2p/go-openssl/AUTHORS
deleted file mode 100644
index a048c1ea1..000000000
--- a/vendor/github.com/libp2p/go-openssl/AUTHORS
+++ /dev/null
@@ -1,24 +0,0 @@
-Andrew Brampton
-Anton Baklanov
-Carlos Martín Nieto
-Charles Strahan
-Christopher Dudley
-Christopher Fredericks
-Colin Misare
-dequis
-Gabriel Russell
-Giulio
-Jakob Unterwurzacher
-Juuso Haavisto
-kujenga
-Phus Lu
-Russ Egan
-Ryan Hileman
-Scott J. Goldman
-Scott Kidder
-Space Monkey, Inc
-Stephen Gallagher
-Viacheslav Biriukov
-Zack Owens
-Ramesh Rayaprolu
-Paras Shah
diff --git a/vendor/github.com/libp2p/go-openssl/LICENSE b/vendor/github.com/libp2p/go-openssl/LICENSE
deleted file mode 100644
index 37ec93a14..000000000
--- a/vendor/github.com/libp2p/go-openssl/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/libp2p/go-openssl/README.md b/vendor/github.com/libp2p/go-openssl/README.md
deleted file mode 100644
index 62ac7dcd6..000000000
--- a/vendor/github.com/libp2p/go-openssl/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# OpenSSL bindings for Go
-
-Forked from https://github.com/spacemonkeygo/openssl (unmaintained) to add:
-
-1. FreeBSD support.
-2. Key equality checking.
-3. A function to get the size of signatures produced by a key.
-
----
-
-Please see http://godoc.org/github.com/libp2p/go-openssl for more info
-
----
-
-### License
-
-Copyright (C) 2017. See AUTHORS.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-### Using on macOS
-1. Install [homebrew](http://brew.sh/)
-2. `$ brew install openssl` or `$ brew install openssl@1.1`
-
-### Using on Windows
-1. Install [mingw-w64](http://mingw-w64.sourceforge.net/)
-2. Install [pkg-config-lite](http://sourceforge.net/projects/pkgconfiglite)
-3. Build (or install precompiled) openssl for mingw32-w64
-4. Set __PKG\_CONFIG\_PATH__ to the directory containing openssl.pc
- (i.e. c:\mingw64\mingw64\lib\pkgconfig)
diff --git a/vendor/github.com/libp2p/go-openssl/alloc.go b/vendor/github.com/libp2p/go-openssl/alloc.go
deleted file mode 100644
index 25d064a2f..000000000
--- a/vendor/github.com/libp2p/go-openssl/alloc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "unsafe"
-
- "github.com/mattn/go-pointer"
-)
-
-//export go_ssl_crypto_ex_free
-func go_ssl_crypto_ex_free(
- parent *C.void, ptr unsafe.Pointer,
- cryptoData *C.CRYPTO_EX_DATA, idx C.int,
- argl C.long, argp *C.void,
-) {
- pointer.Unref(ptr)
-}
diff --git a/vendor/github.com/libp2p/go-openssl/bio.go b/vendor/github.com/libp2p/go-openssl/bio.go
deleted file mode 100644
index caf2b37a7..000000000
--- a/vendor/github.com/libp2p/go-openssl/bio.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "io"
- "reflect"
- "sync"
- "unsafe"
-)
-
-const (
- SSLRecordSize = 16 * 1024
-)
-
-func nonCopyGoBytes(ptr uintptr, length int) []byte {
- var slice []byte
- header := (*reflect.SliceHeader)(unsafe.Pointer(&slice))
- header.Cap = length
- header.Len = length
- header.Data = ptr
- return slice
-}
-
-func nonCopyCString(data *C.char, size C.int) []byte {
- return nonCopyGoBytes(uintptr(unsafe.Pointer(data)), int(size))
-}
-
-var writeBioMapping = newMapping()
-
-type writeBio struct {
- data_mtx sync.Mutex
- op_mtx sync.Mutex
- buf []byte
- release_buffers bool
-}
-
-func loadWritePtr(b *C.BIO) *writeBio {
- t := token(C.X_BIO_get_data(b))
- return (*writeBio)(writeBioMapping.Get(t))
-}
-
-func bioClearRetryFlags(b *C.BIO) {
- C.X_BIO_clear_flags(b, C.BIO_FLAGS_RWS|C.BIO_FLAGS_SHOULD_RETRY)
-}
-
-func bioSetRetryRead(b *C.BIO) {
- C.X_BIO_set_flags(b, C.BIO_FLAGS_READ|C.BIO_FLAGS_SHOULD_RETRY)
-}
-
-//export go_write_bio_write
-func go_write_bio_write(b *C.BIO, data *C.char, size C.int) (rc C.int) {
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: writeBioWrite panic'd: %v", err)
- rc = -1
- }
- }()
- ptr := loadWritePtr(b)
- if ptr == nil || data == nil || size < 0 {
- return -1
- }
- ptr.data_mtx.Lock()
- defer ptr.data_mtx.Unlock()
- bioClearRetryFlags(b)
- ptr.buf = append(ptr.buf, nonCopyCString(data, size)...)
- return size
-}
-
-//export go_write_bio_ctrl
-func go_write_bio_ctrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) (
- rc C.long) {
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: writeBioCtrl panic'd: %v", err)
- rc = -1
- }
- }()
- switch cmd {
- case C.BIO_CTRL_WPENDING:
- return writeBioPending(b)
- case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH:
- return 1
- default:
- return 0
- }
-}
-
-func writeBioPending(b *C.BIO) C.long {
- ptr := loadWritePtr(b)
- if ptr == nil {
- return 0
- }
- ptr.data_mtx.Lock()
- defer ptr.data_mtx.Unlock()
- return C.long(len(ptr.buf))
-}
-
-func (wb *writeBio) WriteTo(w io.Writer) (rv int64, err error) {
- wb.op_mtx.Lock()
- defer wb.op_mtx.Unlock()
-
- // write whatever data we currently have
- wb.data_mtx.Lock()
- data := wb.buf
- wb.data_mtx.Unlock()
-
- if len(data) == 0 {
- return 0, nil
- }
- n, err := w.Write(data)
-
- // subtract however much data we wrote from the buffer
- wb.data_mtx.Lock()
- wb.buf = wb.buf[:copy(wb.buf, wb.buf[n:])]
- if wb.release_buffers && len(wb.buf) == 0 {
- wb.buf = nil
- }
- wb.data_mtx.Unlock()
-
- return int64(n), err
-}
-
-func (wb *writeBio) Disconnect(b *C.BIO) {
- if loadWritePtr(b) == wb {
- writeBioMapping.Del(token(C.X_BIO_get_data(b)))
- C.X_BIO_set_data(b, nil)
- }
-}
-
-func (wb *writeBio) MakeCBIO() *C.BIO {
- rv := C.X_BIO_new_write_bio()
- token := writeBioMapping.Add(unsafe.Pointer(wb))
- C.X_BIO_set_data(rv, unsafe.Pointer(token))
- return rv
-}
-
-var readBioMapping = newMapping()
-
-type readBio struct {
- data_mtx sync.Mutex
- op_mtx sync.Mutex
- buf []byte
- eof bool
- release_buffers bool
-}
-
-func loadReadPtr(b *C.BIO) *readBio {
- return (*readBio)(readBioMapping.Get(token(C.X_BIO_get_data(b))))
-}
-
-//export go_read_bio_read
-func go_read_bio_read(b *C.BIO, data *C.char, size C.int) (rc C.int) {
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: go_read_bio_read panic'd: %v", err)
- rc = -1
- }
- }()
- ptr := loadReadPtr(b)
- if ptr == nil || size < 0 {
- return -1
- }
- ptr.data_mtx.Lock()
- defer ptr.data_mtx.Unlock()
- bioClearRetryFlags(b)
- if len(ptr.buf) == 0 {
- if ptr.eof {
- return 0
- }
- bioSetRetryRead(b)
- return -1
- }
- if size == 0 || data == nil {
- return C.int(len(ptr.buf))
- }
- n := copy(nonCopyCString(data, size), ptr.buf)
- ptr.buf = ptr.buf[:copy(ptr.buf, ptr.buf[n:])]
- if ptr.release_buffers && len(ptr.buf) == 0 {
- ptr.buf = nil
- }
- return C.int(n)
-}
-
-//export go_read_bio_ctrl
-func go_read_bio_ctrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) (
- rc C.long) {
-
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: readBioCtrl panic'd: %v", err)
- rc = -1
- }
- }()
- switch cmd {
- case C.BIO_CTRL_PENDING:
- return readBioPending(b)
- case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH:
- return 1
- default:
- return 0
- }
-}
-
-func readBioPending(b *C.BIO) C.long {
- ptr := loadReadPtr(b)
- if ptr == nil {
- return 0
- }
- ptr.data_mtx.Lock()
- defer ptr.data_mtx.Unlock()
- return C.long(len(ptr.buf))
-}
-
-func (rb *readBio) ReadFromOnce(r io.Reader) (n int, err error) {
- rb.op_mtx.Lock()
- defer rb.op_mtx.Unlock()
-
- // make sure we have a destination that fits at least one SSL record
- rb.data_mtx.Lock()
- if cap(rb.buf) < len(rb.buf)+SSLRecordSize {
- new_buf := make([]byte, len(rb.buf), len(rb.buf)+SSLRecordSize)
- copy(new_buf, rb.buf)
- rb.buf = new_buf
- }
- dst := rb.buf[len(rb.buf):cap(rb.buf)]
- dst_slice := rb.buf
- rb.data_mtx.Unlock()
-
- n, err = r.Read(dst)
- rb.data_mtx.Lock()
- defer rb.data_mtx.Unlock()
- if n > 0 {
- if len(dst_slice) != len(rb.buf) {
- // someone shrunk the buffer, so we read in too far ahead and we
- // need to slide backwards
- copy(rb.buf[len(rb.buf):len(rb.buf)+n], dst)
- }
- rb.buf = rb.buf[:len(rb.buf)+n]
- }
- return n, err
-}
-
-func (rb *readBio) MakeCBIO() *C.BIO {
- rv := C.X_BIO_new_read_bio()
- token := readBioMapping.Add(unsafe.Pointer(rb))
- C.X_BIO_set_data(rv, unsafe.Pointer(token))
- return rv
-}
-
-func (rb *readBio) Disconnect(b *C.BIO) {
- if loadReadPtr(b) == rb {
- readBioMapping.Del(token(C.X_BIO_get_data(b)))
- C.X_BIO_set_data(b, nil)
- }
-}
-
-func (rb *readBio) MarkEOF() {
- rb.data_mtx.Lock()
- defer rb.data_mtx.Unlock()
- rb.eof = true
-}
-
-type anyBio C.BIO
-
-func asAnyBio(b *C.BIO) *anyBio { return (*anyBio)(b) }
-
-func (b *anyBio) Read(buf []byte) (n int, err error) {
- if len(buf) == 0 {
- return 0, nil
- }
- n = int(C.X_BIO_read((*C.BIO)(b), unsafe.Pointer(&buf[0]), C.int(len(buf))))
- if n <= 0 {
- return 0, io.EOF
- }
- return n, nil
-}
-
-func (b *anyBio) Write(buf []byte) (written int, err error) {
- if len(buf) == 0 {
- return 0, nil
- }
- n := int(C.X_BIO_write((*C.BIO)(b), unsafe.Pointer(&buf[0]),
- C.int(len(buf))))
- if n != len(buf) {
- return n, errors.New("BIO write failed")
- }
- return n, nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/build.go b/vendor/github.com/libp2p/go-openssl/build.go
deleted file mode 100644
index 990fbb4b2..000000000
--- a/vendor/github.com/libp2p/go-openssl/build.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !openssl_static
-// +build !openssl_static
-
-package openssl
-
-// #cgo linux windows freebsd openbsd solaris pkg-config: libssl libcrypto
-// #cgo linux freebsd openbsd solaris CFLAGS: -Wno-deprecated-declarations
-// #cgo darwin CFLAGS: -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/openssl/include -Wno-deprecated-declarations
-// #cgo darwin LDFLAGS: -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/openssl/lib -lssl -lcrypto
-// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN
-import "C"
diff --git a/vendor/github.com/libp2p/go-openssl/build_static.go b/vendor/github.com/libp2p/go-openssl/build_static.go
deleted file mode 100644
index dde544618..000000000
--- a/vendor/github.com/libp2p/go-openssl/build_static.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build openssl_static
-// +build openssl_static
-
-package openssl
-
-// #cgo linux windows freebsd openbsd solaris pkg-config: --static libssl libcrypto
-// #cgo linux freebsd openbsd solaris CFLAGS: -Wno-deprecated-declarations
-// #cgo darwin CFLAGS: -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/openssl/include -Wno-deprecated-declarations
-// #cgo darwin LDFLAGS: -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/openssl/lib -lssl -lcrypto
-// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN
-import "C"
diff --git a/vendor/github.com/libp2p/go-openssl/cert.go b/vendor/github.com/libp2p/go-openssl/cert.go
deleted file mode 100644
index 97c788f7c..000000000
--- a/vendor/github.com/libp2p/go-openssl/cert.go
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "io/ioutil"
- "math/big"
- "runtime"
- "time"
- "unsafe"
-)
-
-type EVP_MD int
-
-const (
- EVP_NULL EVP_MD = iota
- EVP_MD5 EVP_MD = iota
- EVP_MD4 EVP_MD = iota
- EVP_SHA EVP_MD = iota
- EVP_SHA1 EVP_MD = iota
- EVP_DSS EVP_MD = iota
- EVP_DSS1 EVP_MD = iota
- EVP_MDC2 EVP_MD = iota
- EVP_RIPEMD160 EVP_MD = iota
- EVP_SHA224 EVP_MD = iota
- EVP_SHA256 EVP_MD = iota
- EVP_SHA384 EVP_MD = iota
- EVP_SHA512 EVP_MD = iota
-)
-
-// X509_Version represents a version on an x509 certificate.
-type X509_Version int
-
-// Specify constants for x509 versions because the standard states that they
-// are represented internally as one lower than the common version name.
-const (
- X509_V1 X509_Version = 0
- X509_V3 X509_Version = 2
-)
-
-type Certificate struct {
- x *C.X509
- Issuer *Certificate
- ref interface{}
- pubKey PublicKey
-}
-
-type CertificateInfo struct {
- Serial *big.Int
- Issued time.Duration
- Expires time.Duration
- Country string
- Organization string
- CommonName string
-}
-
-type Name struct {
- name *C.X509_NAME
-}
-
-// Allocate and return a new Name object.
-func NewName() (*Name, error) {
- n := C.X509_NAME_new()
- if n == nil {
- return nil, errors.New("could not create x509 name")
- }
- name := &Name{name: n}
- runtime.SetFinalizer(name, func(n *Name) {
- C.X509_NAME_free(n.name)
- })
- return name, nil
-}
-
-// AddTextEntry appends a text entry to an X509 NAME.
-func (n *Name) AddTextEntry(field, value string) error {
- cfield := C.CString(field)
- defer C.free(unsafe.Pointer(cfield))
- cvalue := (*C.uchar)(unsafe.Pointer(C.CString(value)))
- defer C.free(unsafe.Pointer(cvalue))
- ret := C.X509_NAME_add_entry_by_txt(
- n.name, cfield, C.MBSTRING_ASC, cvalue, -1, -1, 0)
- if ret != 1 {
- return errors.New("failed to add x509 name text entry")
- }
- return nil
-}
-
-// AddTextEntries allows adding multiple entries to a name in one call.
-func (n *Name) AddTextEntries(entries map[string]string) error {
- for f, v := range entries {
- if err := n.AddTextEntry(f, v); err != nil {
- return err
- }
- }
- return nil
-}
-
-// GetEntry returns a name entry based on NID. If no entry, then ("", false) is
-// returned.
-func (n *Name) GetEntry(nid NID) (entry string, ok bool) {
- entrylen := C.X509_NAME_get_text_by_NID(n.name, C.int(nid), nil, 0)
- if entrylen == -1 {
- return "", false
- }
- buf := (*C.char)(C.malloc(C.size_t(entrylen + 1)))
- defer C.free(unsafe.Pointer(buf))
- C.X509_NAME_get_text_by_NID(n.name, C.int(nid), buf, entrylen+1)
- return C.GoStringN(buf, entrylen), true
-}
-
-// NewCertificate generates a basic certificate based
-// on the provided CertificateInfo struct
-func NewCertificate(info *CertificateInfo, key PublicKey) (*Certificate, error) {
- c := &Certificate{x: C.X509_new()}
- runtime.SetFinalizer(c, func(c *Certificate) {
- C.X509_free(c.x)
- })
-
- name, err := c.GetSubjectName()
- if err != nil {
- return nil, err
- }
- err = name.AddTextEntries(map[string]string{
- "C": info.Country,
- "O": info.Organization,
- "CN": info.CommonName,
- })
- if err != nil {
- return nil, err
- }
- // self-issue for now
- if err := c.SetIssuerName(name); err != nil {
- return nil, err
- }
- if err := c.SetSerial(info.Serial); err != nil {
- return nil, err
- }
- if err := c.SetIssueDate(info.Issued); err != nil {
- return nil, err
- }
- if err := c.SetExpireDate(info.Expires); err != nil {
- return nil, err
- }
- if err := c.SetPubKey(key); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-func (c *Certificate) GetSubjectName() (*Name, error) {
- n := C.X509_get_subject_name(c.x)
- if n == nil {
- return nil, errors.New("failed to get subject name")
- }
- return &Name{name: n}, nil
-}
-
-func (c *Certificate) GetIssuerName() (*Name, error) {
- n := C.X509_get_issuer_name(c.x)
- if n == nil {
- return nil, errors.New("failed to get issuer name")
- }
- return &Name{name: n}, nil
-}
-
-func (c *Certificate) SetSubjectName(name *Name) error {
- if C.X509_set_subject_name(c.x, name.name) != 1 {
- return errors.New("failed to set subject name")
- }
- return nil
-}
-
-// SetIssuer updates the stored Issuer cert
-// and the internal x509 Issuer Name of a certificate.
-// The stored Issuer reference is used when adding extensions.
-func (c *Certificate) SetIssuer(issuer *Certificate) error {
- name, err := issuer.GetSubjectName()
- if err != nil {
- return err
- }
- if err = c.SetIssuerName(name); err != nil {
- return err
- }
- c.Issuer = issuer
- return nil
-}
-
-// SetIssuerName populates the issuer name of a certificate.
-// Use SetIssuer instead, if possible.
-func (c *Certificate) SetIssuerName(name *Name) error {
- if C.X509_set_issuer_name(c.x, name.name) != 1 {
- return errors.New("failed to set subject name")
- }
- return nil
-}
-
-// SetSerial sets the serial of a certificate.
-func (c *Certificate) SetSerial(serial *big.Int) error {
- sno := C.ASN1_INTEGER_new()
- defer C.ASN1_INTEGER_free(sno)
- bn := C.BN_new()
- defer C.BN_free(bn)
-
- serialBytes := serial.Bytes()
- if bn = C.BN_bin2bn((*C.uchar)(unsafe.Pointer(&serialBytes[0])), C.int(len(serialBytes)), bn); bn == nil {
- return errors.New("failed to set serial")
- }
- if sno = C.BN_to_ASN1_INTEGER(bn, sno); sno == nil {
- return errors.New("failed to set serial")
- }
- if C.X509_set_serialNumber(c.x, sno) != 1 {
- return errors.New("failed to set serial")
- }
- return nil
-}
-
-// SetIssueDate sets the certificate issue date relative to the current time.
-func (c *Certificate) SetIssueDate(when time.Duration) error {
- offset := C.long(when / time.Second)
- result := C.X509_gmtime_adj(C.X_X509_get0_notBefore(c.x), offset)
- if result == nil {
- return errors.New("failed to set issue date")
- }
- return nil
-}
-
-// SetExpireDate sets the certificate issue date relative to the current time.
-func (c *Certificate) SetExpireDate(when time.Duration) error {
- offset := C.long(when / time.Second)
- result := C.X509_gmtime_adj(C.X_X509_get0_notAfter(c.x), offset)
- if result == nil {
- return errors.New("failed to set expire date")
- }
- return nil
-}
-
-// SetPubKey assigns a new public key to a certificate.
-func (c *Certificate) SetPubKey(pubKey PublicKey) error {
- c.pubKey = pubKey
- if C.X509_set_pubkey(c.x, pubKey.evpPKey()) != 1 {
- return errors.New("failed to set public key")
- }
- return nil
-}
-
-// Sign a certificate using a private key and a digest name.
-// Accepted digest names are 'sha256', 'sha384', and 'sha512'.
-func (c *Certificate) Sign(privKey PrivateKey, digest EVP_MD) error {
- switch digest {
- case EVP_SHA256:
- case EVP_SHA384:
- case EVP_SHA512:
- default:
- return errors.New("unsupported digest; " +
- "you're probably looking for 'EVP_SHA256' or 'EVP_SHA512'")
- }
- return c.insecureSign(privKey, digest)
-}
-
-func (c *Certificate) insecureSign(privKey PrivateKey, digest EVP_MD) error {
- var md *C.EVP_MD = getDigestFunction(digest)
- if C.X509_sign(c.x, privKey.evpPKey(), md) <= 0 {
- return errors.New("failed to sign certificate")
- }
- return nil
-}
-
-func getDigestFunction(digest EVP_MD) (md *C.EVP_MD) {
- switch digest {
- // please don't use these digest functions
- case EVP_NULL:
- md = C.X_EVP_md_null()
- case EVP_MD5:
- md = C.X_EVP_md5()
- case EVP_SHA:
- md = C.X_EVP_sha()
- case EVP_SHA1:
- md = C.X_EVP_sha1()
- case EVP_DSS:
- md = C.X_EVP_dss()
- case EVP_DSS1:
- md = C.X_EVP_dss1()
- case EVP_RIPEMD160:
- md = C.X_EVP_ripemd160()
- case EVP_SHA224:
- md = C.X_EVP_sha224()
- // you actually want one of these
- case EVP_SHA256:
- md = C.X_EVP_sha256()
- case EVP_SHA384:
- md = C.X_EVP_sha384()
- case EVP_SHA512:
- md = C.X_EVP_sha512()
- }
- return md
-}
-
-// Add an extension to a certificate.
-// Extension constants are NID_* as found in openssl.
-func (c *Certificate) AddExtension(nid NID, value string) error {
- issuer := c
- if c.Issuer != nil {
- issuer = c.Issuer
- }
- var ctx C.X509V3_CTX
- C.X509V3_set_ctx(&ctx, c.x, issuer.x, nil, nil, 0)
- ex := C.X509V3_EXT_conf_nid(nil, &ctx, C.int(nid), C.CString(value))
- if ex == nil {
- return errors.New("failed to create x509v3 extension")
- }
- defer C.X509_EXTENSION_free(ex)
- if C.X509_add_ext(c.x, ex, -1) <= 0 {
- return errors.New("failed to add x509v3 extension")
- }
- return nil
-}
-
-// AddCustomExtension add custom extenstion to the certificate.
-func (c *Certificate) AddCustomExtension(nid NID, value []byte) error {
- val := (*C.char)(C.CBytes(value))
- defer C.free(unsafe.Pointer(val))
- if int(C.add_custom_ext(c.x, C.int(nid), val, C.int(len(value)))) == 0 {
- return errors.New("unable to add extension")
- }
- return nil
-}
-
-// Wraps AddExtension using a map of NID to text extension.
-// Will return without finishing if it encounters an error.
-func (c *Certificate) AddExtensions(extensions map[NID]string) error {
- for nid, value := range extensions {
- if err := c.AddExtension(nid, value); err != nil {
- return err
- }
- }
- return nil
-}
-
-// LoadCertificateFromPEM loads an X509 certificate from a PEM-encoded block.
-func LoadCertificateFromPEM(pem_block []byte) (*Certificate, error) {
- if len(pem_block) == 0 {
- return nil, errors.New("empty pem block")
- }
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
- C.int(len(pem_block)))
- cert := C.PEM_read_bio_X509(bio, nil, nil, nil)
- C.BIO_free(bio)
- if cert == nil {
- return nil, errorFromErrorQueue()
- }
- x := &Certificate{x: cert}
- runtime.SetFinalizer(x, func(x *Certificate) {
- C.X509_free(x.x)
- })
- return x, nil
-}
-
-// MarshalPEM converts the X509 certificate to PEM-encoded format
-func (c *Certificate) MarshalPEM() (pem_block []byte, err error) {
- bio := C.BIO_new(C.BIO_s_mem())
- if bio == nil {
- return nil, errors.New("failed to allocate memory BIO")
- }
- defer C.BIO_free(bio)
- if int(C.PEM_write_bio_X509(bio, c.x)) != 1 {
- return nil, errors.New("failed dumping certificate")
- }
- return ioutil.ReadAll(asAnyBio(bio))
-}
-
-// PublicKey returns the public key embedded in the X509 certificate.
-func (c *Certificate) PublicKey() (PublicKey, error) {
- pkey := C.X509_get_pubkey(c.x)
- if pkey == nil {
- return nil, errors.New("no public key found")
- }
- key := &pKey{key: pkey}
- runtime.SetFinalizer(key, func(key *pKey) {
- C.EVP_PKEY_free(key.key)
- })
- return key, nil
-}
-
-// GetSerialNumberHex returns the certificate's serial number in hex format
-func (c *Certificate) GetSerialNumberHex() (serial string) {
- asn1_i := C.X509_get_serialNumber(c.x)
- bignum := C.ASN1_INTEGER_to_BN(asn1_i, nil)
- hex := C.BN_bn2hex(bignum)
- serial = C.GoString(hex)
- C.BN_free(bignum)
- C.X_OPENSSL_free(unsafe.Pointer(hex))
- return
-}
-
-// GetVersion returns the X509 version of the certificate.
-func (c *Certificate) GetVersion() X509_Version {
- return X509_Version(C.X_X509_get_version(c.x))
-}
-
-// SetVersion sets the X509 version of the certificate.
-func (c *Certificate) SetVersion(version X509_Version) error {
- cvers := C.long(version)
- if C.X_X509_set_version(c.x, cvers) != 1 {
- return errors.New("failed to set certificate version")
- }
- return nil
-}
-
-// GetExtensionValue returns the value of the given NID's extension.
-func (c *Certificate) GetExtensionValue(nid NID) []byte {
- dataLength := C.int(0)
- val := C.get_extention(c.x, C.int(nid), &dataLength)
- return C.GoBytes(unsafe.Pointer(val), dataLength)
-}
diff --git a/vendor/github.com/libp2p/go-openssl/ciphers.go b/vendor/github.com/libp2p/go-openssl/ciphers.go
deleted file mode 100644
index a3a597c45..000000000
--- a/vendor/github.com/libp2p/go-openssl/ciphers.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "fmt"
- "runtime"
- "unsafe"
-)
-
-const (
- GCM_TAG_MAXLEN = 16
-)
-
-type CipherCtx interface {
- Cipher() *Cipher
- BlockSize() int
- KeySize() int
- IVSize() int
-}
-
-type Cipher struct {
- ptr *C.EVP_CIPHER
-}
-
-func (c *Cipher) Nid() NID {
- return NID(C.X_EVP_CIPHER_nid(c.ptr))
-}
-
-func (c *Cipher) ShortName() (string, error) {
- return Nid2ShortName(c.Nid())
-}
-
-func (c *Cipher) BlockSize() int {
- return int(C.X_EVP_CIPHER_block_size(c.ptr))
-}
-
-func (c *Cipher) KeySize() int {
- return int(C.X_EVP_CIPHER_key_length(c.ptr))
-}
-
-func (c *Cipher) IVSize() int {
- return int(C.X_EVP_CIPHER_iv_length(c.ptr))
-}
-
-func Nid2ShortName(nid NID) (string, error) {
- sn := C.OBJ_nid2sn(C.int(nid))
- if sn == nil {
- return "", fmt.Errorf("NID %d not found", nid)
- }
- return C.GoString(sn), nil
-}
-
-func GetCipherByName(name string) (*Cipher, error) {
- cname := C.CString(name)
- defer C.free(unsafe.Pointer(cname))
- p := C.EVP_get_cipherbyname(cname)
- if p == nil {
- return nil, fmt.Errorf("Cipher %v not found", name)
- }
- // we can consider ciphers to use static mem; don't need to free
- return &Cipher{ptr: p}, nil
-}
-
-func GetCipherByNid(nid NID) (*Cipher, error) {
- sn, err := Nid2ShortName(nid)
- if err != nil {
- return nil, err
- }
- return GetCipherByName(sn)
-}
-
-type cipherCtx struct {
- ctx *C.EVP_CIPHER_CTX
-}
-
-func newCipherCtx() (*cipherCtx, error) {
- cctx := C.EVP_CIPHER_CTX_new()
- if cctx == nil {
- return nil, errors.New("failed to allocate cipher context")
- }
- ctx := &cipherCtx{cctx}
- runtime.SetFinalizer(ctx, func(ctx *cipherCtx) {
- C.EVP_CIPHER_CTX_free(ctx.ctx)
- })
- return ctx, nil
-}
-
-func (ctx *cipherCtx) applyKeyAndIV(key, iv []byte) error {
- var kptr, iptr *C.uchar
- if key != nil {
- if len(key) != ctx.KeySize() {
- return fmt.Errorf("bad key size (%d bytes instead of %d)",
- len(key), ctx.KeySize())
- }
- kptr = (*C.uchar)(&key[0])
- }
- if iv != nil {
- if len(iv) != ctx.IVSize() {
- return fmt.Errorf("bad IV size (%d bytes instead of %d)",
- len(iv), ctx.IVSize())
- }
- iptr = (*C.uchar)(&iv[0])
- }
- if kptr != nil || iptr != nil {
- var res C.int
- if C.X_EVP_CIPHER_CTX_encrypting(ctx.ctx) != 0 {
- res = C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, kptr, iptr)
- } else {
- res = C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, kptr, iptr)
- }
- if res != 1 {
- return errors.New("failed to apply key/IV")
- }
- }
- return nil
-}
-
-func (ctx *cipherCtx) Cipher() *Cipher {
- return &Cipher{ptr: C.X_EVP_CIPHER_CTX_cipher(ctx.ctx)}
-}
-
-func (ctx *cipherCtx) BlockSize() int {
- return int(C.X_EVP_CIPHER_CTX_block_size(ctx.ctx))
-}
-
-func (ctx *cipherCtx) KeySize() int {
- return int(C.X_EVP_CIPHER_CTX_key_length(ctx.ctx))
-}
-
-func (ctx *cipherCtx) IVSize() int {
- return int(C.X_EVP_CIPHER_CTX_iv_length(ctx.ctx))
-}
-
-func (ctx *cipherCtx) SetPadding(pad bool) {
- if pad {
- C.X_EVP_CIPHER_CTX_set_padding(ctx.ctx, 1)
- } else {
- C.X_EVP_CIPHER_CTX_set_padding(ctx.ctx, 0)
- }
-}
-
-func (ctx *cipherCtx) setCtrl(code, arg int) error {
- res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), nil)
- if res != 1 {
- return fmt.Errorf("failed to set code %d to %d [result %d]",
- code, arg, res)
- }
- return nil
-}
-
-func (ctx *cipherCtx) setCtrlBytes(code, arg int, value []byte) error {
- res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg),
- unsafe.Pointer(&value[0]))
- if res != 1 {
- return fmt.Errorf("failed to set code %d with arg %d to %x [result %d]",
- code, arg, value, res)
- }
- return nil
-}
-
-func (ctx *cipherCtx) getCtrlInt(code, arg int) (int, error) {
- var returnVal C.int
- res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg),
- unsafe.Pointer(&returnVal))
- if res != 1 {
- return 0, fmt.Errorf("failed to get code %d with arg %d [result %d]",
- code, arg, res)
- }
- return int(returnVal), nil
-}
-
-func (ctx *cipherCtx) getCtrlBytes(code, arg, expectsize int) ([]byte, error) {
- returnVal := make([]byte, expectsize)
- res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg),
- unsafe.Pointer(&returnVal[0]))
- if res != 1 {
- return nil, fmt.Errorf("failed to get code %d with arg %d [result %d]",
- code, arg, res)
- }
- return returnVal, nil
-}
-
-type EncryptionCipherCtx interface {
- CipherCtx
-
- // pass in plaintext, get back ciphertext. can be called
- // multiple times as needed
- EncryptUpdate(input []byte) ([]byte, error)
-
- // call after all plaintext has been passed in; may return
- // additional ciphertext if needed to finish off a block
- // or extra padding information
- EncryptFinal() ([]byte, error)
-}
-
-type DecryptionCipherCtx interface {
- CipherCtx
-
- // pass in ciphertext, get back plaintext. can be called
- // multiple times as needed
- DecryptUpdate(input []byte) ([]byte, error)
-
- // call after all ciphertext has been passed in; may return
- // additional plaintext if needed to finish off a block
- DecryptFinal() ([]byte, error)
-}
-
-type encryptionCipherCtx struct {
- *cipherCtx
-}
-
-type decryptionCipherCtx struct {
- *cipherCtx
-}
-
-func newEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
- *encryptionCipherCtx, error) {
- if c == nil {
- return nil, errors.New("null cipher not allowed")
- }
- ctx, err := newCipherCtx()
- if err != nil {
- return nil, err
- }
- var eptr *C.ENGINE
- if e != nil {
- eptr = e.e
- }
- if C.EVP_EncryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) != 1 {
- return nil, errors.New("failed to initialize cipher context")
- }
- err = ctx.applyKeyAndIV(key, iv)
- if err != nil {
- return nil, err
- }
- return &encryptionCipherCtx{cipherCtx: ctx}, nil
-}
-
-func newDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
- *decryptionCipherCtx, error) {
- if c == nil {
- return nil, errors.New("null cipher not allowed")
- }
- ctx, err := newCipherCtx()
- if err != nil {
- return nil, err
- }
- var eptr *C.ENGINE
- if e != nil {
- eptr = e.e
- }
- if C.EVP_DecryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) != 1 {
- return nil, errors.New("failed to initialize cipher context")
- }
- err = ctx.applyKeyAndIV(key, iv)
- if err != nil {
- return nil, err
- }
- return &decryptionCipherCtx{cipherCtx: ctx}, nil
-}
-
-func NewEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
- EncryptionCipherCtx, error) {
- return newEncryptionCipherCtx(c, e, key, iv)
-}
-
-func NewDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
- DecryptionCipherCtx, error) {
- return newDecryptionCipherCtx(c, e, key, iv)
-}
-
-func (ctx *encryptionCipherCtx) EncryptUpdate(input []byte) ([]byte, error) {
- if len(input) == 0 {
- return nil, nil
- }
- outbuf := make([]byte, len(input)+ctx.BlockSize())
- outlen := C.int(len(outbuf))
- res := C.EVP_EncryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen,
- (*C.uchar)(&input[0]), C.int(len(input)))
- if res != 1 {
- return nil, fmt.Errorf("failed to encrypt [result %d]", res)
- }
- return outbuf[:outlen], nil
-}
-
-func (ctx *decryptionCipherCtx) DecryptUpdate(input []byte) ([]byte, error) {
- if len(input) == 0 {
- return nil, nil
- }
- outbuf := make([]byte, len(input)+ctx.BlockSize())
- outlen := C.int(len(outbuf))
- res := C.EVP_DecryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen,
- (*C.uchar)(&input[0]), C.int(len(input)))
- if res != 1 {
- return nil, fmt.Errorf("failed to decrypt [result %d]", res)
- }
- return outbuf[:outlen], nil
-}
-
-func (ctx *encryptionCipherCtx) EncryptFinal() ([]byte, error) {
- outbuf := make([]byte, ctx.BlockSize())
- var outlen C.int
- if C.EVP_EncryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) != 1 {
- return nil, errors.New("encryption failed")
- }
- return outbuf[:outlen], nil
-}
-
-func (ctx *decryptionCipherCtx) DecryptFinal() ([]byte, error) {
- outbuf := make([]byte, ctx.BlockSize())
- var outlen C.int
- if C.EVP_DecryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) != 1 {
- // this may mean the tag failed to verify- all previous plaintext
- // returned must be considered faked and invalid
- return nil, errors.New("decryption failed")
- }
- return outbuf[:outlen], nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go b/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go
deleted file mode 100644
index 06ba0fedb..000000000
--- a/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include
-import "C"
-
-import (
- "errors"
- "fmt"
-)
-
-type AuthenticatedEncryptionCipherCtx interface {
- EncryptionCipherCtx
-
- // data passed in to ExtraData() is part of the final output; it is
- // not encrypted itself, but is part of the authenticated data. when
- // decrypting or authenticating, pass back with the decryption
- // context's ExtraData()
- ExtraData([]byte) error
-
- // use after finalizing encryption to get the authenticating tag
- GetTag() ([]byte, error)
-}
-
-type AuthenticatedDecryptionCipherCtx interface {
- DecryptionCipherCtx
-
- // pass in any extra data that was added during encryption with the
- // encryption context's ExtraData()
- ExtraData([]byte) error
-
- // use before finalizing decryption to tell the library what the
- // tag is expected to be
- SetTag([]byte) error
-}
-
-type authEncryptionCipherCtx struct {
- *encryptionCipherCtx
-}
-
-type authDecryptionCipherCtx struct {
- *decryptionCipherCtx
-}
-
-func getGCMCipher(blocksize int) (*Cipher, error) {
- var cipherptr *C.EVP_CIPHER
- switch blocksize {
- case 256:
- cipherptr = C.EVP_aes_256_gcm()
- case 192:
- cipherptr = C.EVP_aes_192_gcm()
- case 128:
- cipherptr = C.EVP_aes_128_gcm()
- default:
- return nil, fmt.Errorf("unknown block size %d", blocksize)
- }
- return &Cipher{ptr: cipherptr}, nil
-}
-
-func NewGCMEncryptionCipherCtx(blocksize int, e *Engine, key, iv []byte) (
- AuthenticatedEncryptionCipherCtx, error) {
- cipher, err := getGCMCipher(blocksize)
- if err != nil {
- return nil, err
- }
- ctx, err := newEncryptionCipherCtx(cipher, e, key, nil)
- if err != nil {
- return nil, err
- }
- if len(iv) > 0 {
- err := ctx.setCtrl(C.EVP_CTRL_GCM_SET_IVLEN, len(iv))
- if err != nil {
- return nil, fmt.Errorf("could not set IV len to %d: %s",
- len(iv), err)
- }
- if C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, nil,
- (*C.uchar)(&iv[0])) != 1 {
- return nil, errors.New("failed to apply IV")
- }
- }
- return &authEncryptionCipherCtx{encryptionCipherCtx: ctx}, nil
-}
-
-func NewGCMDecryptionCipherCtx(blocksize int, e *Engine, key, iv []byte) (
- AuthenticatedDecryptionCipherCtx, error) {
- cipher, err := getGCMCipher(blocksize)
- if err != nil {
- return nil, err
- }
- ctx, err := newDecryptionCipherCtx(cipher, e, key, nil)
- if err != nil {
- return nil, err
- }
- if len(iv) > 0 {
- err := ctx.setCtrl(C.EVP_CTRL_GCM_SET_IVLEN, len(iv))
- if err != nil {
- return nil, fmt.Errorf("could not set IV len to %d: %s",
- len(iv), err)
- }
- if C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, nil,
- (*C.uchar)(&iv[0])) != 1 {
- return nil, errors.New("failed to apply IV")
- }
- }
- return &authDecryptionCipherCtx{decryptionCipherCtx: ctx}, nil
-}
-
-func (ctx *authEncryptionCipherCtx) ExtraData(aad []byte) error {
- if aad == nil {
- return nil
- }
- var outlen C.int
- if C.EVP_EncryptUpdate(ctx.ctx, nil, &outlen, (*C.uchar)(&aad[0]),
- C.int(len(aad))) != 1 {
- return errors.New("failed to add additional authenticated data")
- }
- return nil
-}
-
-func (ctx *authDecryptionCipherCtx) ExtraData(aad []byte) error {
- if aad == nil {
- return nil
- }
- var outlen C.int
- if C.EVP_DecryptUpdate(ctx.ctx, nil, &outlen, (*C.uchar)(&aad[0]),
- C.int(len(aad))) != 1 {
- return errors.New("failed to add additional authenticated data")
- }
- return nil
-}
-
-func (ctx *authEncryptionCipherCtx) GetTag() ([]byte, error) {
- return ctx.getCtrlBytes(C.EVP_CTRL_GCM_GET_TAG, GCM_TAG_MAXLEN,
- GCM_TAG_MAXLEN)
-}
-
-func (ctx *authDecryptionCipherCtx) SetTag(tag []byte) error {
- return ctx.setCtrlBytes(C.EVP_CTRL_GCM_SET_TAG, len(tag), tag)
-}
diff --git a/vendor/github.com/libp2p/go-openssl/conn.go b/vendor/github.com/libp2p/go-openssl/conn.go
deleted file mode 100644
index fc9421ffc..000000000
--- a/vendor/github.com/libp2p/go-openssl/conn.go
+++ /dev/null
@@ -1,621 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "runtime"
- "sync"
- "time"
- "unsafe"
-
- "github.com/libp2p/go-openssl/utils"
- "github.com/mattn/go-pointer"
-)
-
-var (
- errZeroReturn = errors.New("zero return")
- errWantRead = errors.New("want read")
- errWantWrite = errors.New("want write")
- errTryAgain = errors.New("try again")
-)
-
-type Conn struct {
- *SSL
-
- conn net.Conn
- ctx *Ctx // for gc
- into_ssl *readBio
- from_ssl *writeBio
- is_shutdown bool
- mtx sync.Mutex
- want_read_future *utils.Future
-}
-
-type VerifyResult int
-
-const (
- Ok VerifyResult = C.X509_V_OK
- UnableToGetIssuerCert VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT
- UnableToGetCrl VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL
- UnableToDecryptCertSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE
- UnableToDecryptCrlSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE
- UnableToDecodeIssuerPublicKey VerifyResult = C.X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY
- CertSignatureFailure VerifyResult = C.X509_V_ERR_CERT_SIGNATURE_FAILURE
- CrlSignatureFailure VerifyResult = C.X509_V_ERR_CRL_SIGNATURE_FAILURE
- CertNotYetValid VerifyResult = C.X509_V_ERR_CERT_NOT_YET_VALID
- CertHasExpired VerifyResult = C.X509_V_ERR_CERT_HAS_EXPIRED
- CrlNotYetValid VerifyResult = C.X509_V_ERR_CRL_NOT_YET_VALID
- CrlHasExpired VerifyResult = C.X509_V_ERR_CRL_HAS_EXPIRED
- ErrorInCertNotBeforeField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD
- ErrorInCertNotAfterField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD
- ErrorInCrlLastUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD
- ErrorInCrlNextUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD
- OutOfMem VerifyResult = C.X509_V_ERR_OUT_OF_MEM
- DepthZeroSelfSignedCert VerifyResult = C.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT
- SelfSignedCertInChain VerifyResult = C.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN
- UnableToGetIssuerCertLocally VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY
- UnableToVerifyLeafSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE
- CertChainTooLong VerifyResult = C.X509_V_ERR_CERT_CHAIN_TOO_LONG
- CertRevoked VerifyResult = C.X509_V_ERR_CERT_REVOKED
- InvalidCa VerifyResult = C.X509_V_ERR_INVALID_CA
- PathLengthExceeded VerifyResult = C.X509_V_ERR_PATH_LENGTH_EXCEEDED
- InvalidPurpose VerifyResult = C.X509_V_ERR_INVALID_PURPOSE
- CertUntrusted VerifyResult = C.X509_V_ERR_CERT_UNTRUSTED
- CertRejected VerifyResult = C.X509_V_ERR_CERT_REJECTED
- SubjectIssuerMismatch VerifyResult = C.X509_V_ERR_SUBJECT_ISSUER_MISMATCH
- AkidSkidMismatch VerifyResult = C.X509_V_ERR_AKID_SKID_MISMATCH
- AkidIssuerSerialMismatch VerifyResult = C.X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH
- KeyusageNoCertsign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CERTSIGN
- UnableToGetCrlIssuer VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER
- UnhandledCriticalExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION
- KeyusageNoCrlSign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CRL_SIGN
- UnhandledCriticalCrlExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION
- InvalidNonCa VerifyResult = C.X509_V_ERR_INVALID_NON_CA
- ProxyPathLengthExceeded VerifyResult = C.X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED
- KeyusageNoDigitalSignature VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE
- ProxyCertificatesNotAllowed VerifyResult = C.X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED
- InvalidExtension VerifyResult = C.X509_V_ERR_INVALID_EXTENSION
- InvalidPolicyExtension VerifyResult = C.X509_V_ERR_INVALID_POLICY_EXTENSION
- NoExplicitPolicy VerifyResult = C.X509_V_ERR_NO_EXPLICIT_POLICY
- UnnestedResource VerifyResult = C.X509_V_ERR_UNNESTED_RESOURCE
- ApplicationVerification VerifyResult = C.X509_V_ERR_APPLICATION_VERIFICATION
-)
-
-func newSSL(ctx *C.SSL_CTX) (*C.SSL, error) {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- ssl := C.SSL_new(ctx)
- if ssl == nil {
- return nil, errorFromErrorQueue()
- }
- return ssl, nil
-}
-
-func newConn(conn net.Conn, ctx *Ctx) (*Conn, error) {
- ssl, err := newSSL(ctx.ctx)
- if err != nil {
- return nil, err
- }
-
- into_ssl := &readBio{}
- from_ssl := &writeBio{}
-
- if ctx.GetMode()&ReleaseBuffers > 0 {
- into_ssl.release_buffers = true
- from_ssl.release_buffers = true
- }
-
- into_ssl_cbio := into_ssl.MakeCBIO()
- from_ssl_cbio := from_ssl.MakeCBIO()
- if into_ssl_cbio == nil || from_ssl_cbio == nil {
- // these frees are null safe
- C.BIO_free(into_ssl_cbio)
- C.BIO_free(from_ssl_cbio)
- C.SSL_free(ssl)
- return nil, errors.New("failed to allocate memory BIO")
- }
-
- // the ssl object takes ownership of these objects now
- C.SSL_set_bio(ssl, into_ssl_cbio, from_ssl_cbio)
-
- s := &SSL{ssl: ssl}
- C.SSL_set_ex_data(s.ssl, get_ssl_idx(), pointer.Save(s))
-
- c := &Conn{
- SSL: s,
-
- conn: conn,
- ctx: ctx,
- into_ssl: into_ssl,
- from_ssl: from_ssl}
- runtime.SetFinalizer(c, func(c *Conn) {
- c.into_ssl.Disconnect(into_ssl_cbio)
- c.from_ssl.Disconnect(from_ssl_cbio)
- C.SSL_free(c.ssl)
- })
- return c, nil
-}
-
-// Client wraps an existing stream connection and puts it in the connect state
-// for any subsequent handshakes.
-//
-// IMPORTANT NOTE: if you use this method instead of Dial to construct an SSL
-// connection, you are responsible for verifying the peer's hostname.
-// Otherwise, you are vulnerable to MITM attacks.
-//
-// Client also does not set up SNI for you like Dial does.
-//
-// Client connections probably won't work for you unless you set a verify
-// location or add some certs to the certificate store of the client context
-// you're using. This library is not nice enough to use the system certificate
-// store by default for you yet.
-func Client(conn net.Conn, ctx *Ctx) (*Conn, error) {
- c, err := newConn(conn, ctx)
- if err != nil {
- return nil, err
- }
- C.SSL_set_connect_state(c.ssl)
- return c, nil
-}
-
-// Server wraps an existing stream connection and puts it in the accept state
-// for any subsequent handshakes.
-func Server(conn net.Conn, ctx *Ctx) (*Conn, error) {
- c, err := newConn(conn, ctx)
- if err != nil {
- return nil, err
- }
- C.SSL_set_accept_state(c.ssl)
- return c, nil
-}
-
-func (c *Conn) GetCtx() *Ctx { return c.ctx }
-
-func (c *Conn) CurrentCipher() (string, error) {
- p := C.X_SSL_get_cipher_name(c.ssl)
- if p == nil {
- return "", errors.New("session not established")
- }
-
- return C.GoString(p), nil
-}
-
-func (c *Conn) fillInputBuffer() error {
- for {
- n, err := c.into_ssl.ReadFromOnce(c.conn)
- if n == 0 && err == nil {
- continue
- }
- if err == io.EOF {
- c.into_ssl.MarkEOF()
- return c.Close()
- }
- return err
- }
-}
-
-func (c *Conn) flushOutputBuffer() error {
- _, err := c.from_ssl.WriteTo(c.conn)
- return err
-}
-
-func (c *Conn) getErrorHandler(rv C.int, errno error) func() error {
- errcode := C.SSL_get_error(c.ssl, rv)
- switch errcode {
- case C.SSL_ERROR_ZERO_RETURN:
- return func() error {
- c.Close()
- return io.ErrUnexpectedEOF
- }
- case C.SSL_ERROR_WANT_READ:
- go c.flushOutputBuffer()
- if c.want_read_future != nil {
- want_read_future := c.want_read_future
- return func() error {
- _, err := want_read_future.Get()
- return err
- }
- }
- c.want_read_future = utils.NewFuture()
- want_read_future := c.want_read_future
- return func() (err error) {
- defer func() {
- c.mtx.Lock()
- c.want_read_future = nil
- c.mtx.Unlock()
- want_read_future.Set(nil, err)
- }()
- err = c.fillInputBuffer()
- if err != nil {
- return err
- }
- return errTryAgain
- }
- case C.SSL_ERROR_WANT_WRITE:
- return func() error {
- err := c.flushOutputBuffer()
- if err != nil {
- return err
- }
- return errTryAgain
- }
- case C.SSL_ERROR_SYSCALL:
- var err error
- if C.ERR_peek_error() == 0 {
- switch rv {
- case 0:
- err = errors.New("protocol-violating EOF")
- case -1:
- err = errno
- default:
- err = errorFromErrorQueue()
- }
- } else {
- err = errorFromErrorQueue()
- }
- return func() error { return err }
- default:
- err := errorFromErrorQueue()
- return func() error { return err }
- }
-}
-
-func (c *Conn) handleError(errcb func() error) error {
- if errcb != nil {
- return errcb()
- }
- return nil
-}
-
-func (c *Conn) handshake() func() error {
- c.mtx.Lock()
- defer c.mtx.Unlock()
- if c.is_shutdown {
- return func() error { return io.ErrUnexpectedEOF }
- }
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- rv, errno := C.SSL_do_handshake(c.ssl)
- if rv > 0 {
- return nil
- }
- return c.getErrorHandler(rv, errno)
-}
-
-// Handshake performs an SSL handshake. If a handshake is not manually
-// triggered, it will run before the first I/O on the encrypted stream.
-func (c *Conn) Handshake() error {
- err := errTryAgain
- for err == errTryAgain {
- err = c.handleError(c.handshake())
- }
- go c.flushOutputBuffer()
- return err
-}
-
-// PeerCertificate returns the Certificate of the peer with which you're
-// communicating. Only valid after a handshake.
-func (c *Conn) PeerCertificate() (*Certificate, error) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
- if c.is_shutdown {
- return nil, errors.New("connection closed")
- }
- x := C.SSL_get_peer_certificate(c.ssl)
- if x == nil {
- return nil, errors.New("no peer certificate found")
- }
- cert := &Certificate{x: x}
- runtime.SetFinalizer(cert, func(cert *Certificate) {
- C.X509_free(cert.x)
- })
- return cert, nil
-}
-
-// loadCertificateStack loads up a stack of x509 certificates and returns them,
-// handling memory ownership.
-func (c *Conn) loadCertificateStack(sk *C.struct_stack_st_X509) (
- rv []*Certificate) {
-
- sk_num := int(C.X_sk_X509_num(sk))
- rv = make([]*Certificate, 0, sk_num)
- for i := 0; i < sk_num; i++ {
- x := C.X_sk_X509_value(sk, C.int(i))
- // ref holds on to the underlying connection memory so we don't need to
- // worry about incrementing refcounts manually or freeing the X509
- rv = append(rv, &Certificate{x: x, ref: c})
- }
- return rv
-}
-
-// PeerCertificateChain returns the certificate chain of the peer. If called on
-// the client side, the stack also contains the peer's certificate; if called
-// on the server side, the peer's certificate must be obtained separately using
-// PeerCertificate.
-func (c *Conn) PeerCertificateChain() (rv []*Certificate, err error) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
- if c.is_shutdown {
- return nil, errors.New("connection closed")
- }
- sk := C.SSL_get_peer_cert_chain(c.ssl)
- if sk == nil {
- return nil, errors.New("no peer certificates found")
- }
- return c.loadCertificateStack(sk), nil
-}
-
-type ConnectionState struct {
- Certificate *Certificate
- CertificateError error
- CertificateChain []*Certificate
- CertificateChainError error
- SessionReused bool
-}
-
-func (c *Conn) ConnectionState() (rv ConnectionState) {
- rv.Certificate, rv.CertificateError = c.PeerCertificate()
- rv.CertificateChain, rv.CertificateChainError = c.PeerCertificateChain()
- rv.SessionReused = c.SessionReused()
- return
-}
-
-func (c *Conn) shutdown() func() error {
- c.mtx.Lock()
- defer c.mtx.Unlock()
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- rv, errno := C.SSL_shutdown(c.ssl)
- if rv > 0 {
- return nil
- }
- if rv == 0 {
- // The OpenSSL docs say that in this case, the shutdown is not
- // finished, and we should call SSL_shutdown() a second time, if a
- // bidirectional shutdown is going to be performed. Further, the
- // output of SSL_get_error may be misleading, as an erroneous
- // SSL_ERROR_SYSCALL may be flagged even though no error occurred.
- // So, TODO: revisit bidrectional shutdown, possibly trying again.
- // Note: some broken clients won't engage in bidirectional shutdown
- // without tickling them to close by sending a TCP_FIN packet, or
- // shutting down the write-side of the connection.
- return nil
- } else {
- return c.getErrorHandler(rv, errno)
- }
-}
-
-func (c *Conn) shutdownLoop() error {
- err := errTryAgain
- shutdown_tries := 0
- for err == errTryAgain {
- shutdown_tries = shutdown_tries + 1
- err = c.handleError(c.shutdown())
- if err == nil {
- return c.flushOutputBuffer()
- }
- if err == errTryAgain && shutdown_tries >= 2 {
- return errors.New("shutdown requested a third time?")
- }
- }
- if err == io.ErrUnexpectedEOF {
- err = nil
- }
- return err
-}
-
-// Close shuts down the SSL connection and closes the underlying wrapped
-// connection.
-func (c *Conn) Close() error {
- c.mtx.Lock()
- if c.is_shutdown {
- c.mtx.Unlock()
- return nil
- }
- c.is_shutdown = true
- c.mtx.Unlock()
- var errs utils.ErrorGroup
- errs.Add(c.shutdownLoop())
- errs.Add(c.conn.Close())
- return errs.Finalize()
-}
-
-func (c *Conn) read(b []byte) (int, func() error) {
- if len(b) == 0 {
- return 0, nil
- }
- c.mtx.Lock()
- defer c.mtx.Unlock()
- if c.is_shutdown {
- return 0, func() error { return io.EOF }
- }
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- rv, errno := C.SSL_read(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b)))
- if rv > 0 {
- return int(rv), nil
- }
- return 0, c.getErrorHandler(rv, errno)
-}
-
-// Read reads up to len(b) bytes into b. It returns the number of bytes read
-// and an error if applicable. io.EOF is returned when the caller can expect
-// to see no more data.
-func (c *Conn) Read(b []byte) (n int, err error) {
- if len(b) == 0 {
- return 0, nil
- }
- err = errTryAgain
- for err == errTryAgain {
- n, errcb := c.read(b)
- err = c.handleError(errcb)
- if err == nil {
- go c.flushOutputBuffer()
- return n, nil
- }
- if err == io.ErrUnexpectedEOF {
- err = io.EOF
- }
- }
- return 0, err
-}
-
-func (c *Conn) write(b []byte) (int, func() error) {
- if len(b) == 0 {
- return 0, nil
- }
- c.mtx.Lock()
- defer c.mtx.Unlock()
- if c.is_shutdown {
- err := errors.New("connection closed")
- return 0, func() error { return err }
- }
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- rv, errno := C.SSL_write(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b)))
- if rv > 0 {
- return int(rv), nil
- }
- return 0, c.getErrorHandler(rv, errno)
-}
-
-// Write will encrypt the contents of b and write it to the underlying stream.
-// Performance will be vastly improved if the size of b is a multiple of
-// SSLRecordSize.
-func (c *Conn) Write(b []byte) (written int, err error) {
- if len(b) == 0 {
- return 0, nil
- }
- err = errTryAgain
- for err == errTryAgain {
- n, errcb := c.write(b)
- err = c.handleError(errcb)
- if err == nil {
- return n, c.flushOutputBuffer()
- }
- }
- return 0, err
-}
-
-// VerifyHostname pulls the PeerCertificate and calls VerifyHostname on the
-// certificate.
-func (c *Conn) VerifyHostname(host string) error {
- cert, err := c.PeerCertificate()
- if err != nil {
- return err
- }
- return cert.VerifyHostname(host)
-}
-
-// LocalAddr returns the underlying connection's local address
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the underlying connection's remote address
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// SetDeadline calls SetDeadline on the underlying connection.
-func (c *Conn) SetDeadline(t time.Time) error {
- return c.conn.SetDeadline(t)
-}
-
-// SetReadDeadline calls SetReadDeadline on the underlying connection.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetWriteDeadline calls SetWriteDeadline on the underlying connection.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return c.conn.SetWriteDeadline(t)
-}
-
-func (c *Conn) UnderlyingConn() net.Conn {
- return c.conn
-}
-
-func (c *Conn) SetTlsExtHostName(name string) error {
- cname := C.CString(name)
- defer C.free(unsafe.Pointer(cname))
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- if C.X_SSL_set_tlsext_host_name(c.ssl, cname) == 0 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-func (c *Conn) VerifyResult() VerifyResult {
- return VerifyResult(C.SSL_get_verify_result(c.ssl))
-}
-
-func (c *Conn) SessionReused() bool {
- return C.X_SSL_session_reused(c.ssl) == 1
-}
-
-func (c *Conn) GetSession() ([]byte, error) {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- // get1 increases the refcount of the session, so we have to free it.
- session := (*C.SSL_SESSION)(C.SSL_get1_session(c.ssl))
- if session == nil {
- return nil, errors.New("failed to get session")
- }
- defer C.SSL_SESSION_free(session)
-
- // get the size of the encoding
- slen := C.i2d_SSL_SESSION(session, nil)
-
- buf := (*C.uchar)(C.malloc(C.size_t(slen)))
- defer C.free(unsafe.Pointer(buf))
-
- // this modifies the value of buf (seriously), so we have to pass in a temp
- // var so that we can actually read the bytes from buf.
- tmp := buf
- slen2 := C.i2d_SSL_SESSION(session, &tmp)
- if slen != slen2 {
- return nil, errors.New("session had different lengths")
- }
-
- return C.GoBytes(unsafe.Pointer(buf), slen), nil
-}
-
-func (c *Conn) setSession(session []byte) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- ptr := (*C.uchar)(&session[0])
- s := C.d2i_SSL_SESSION(nil, &ptr, C.long(len(session)))
- if s == nil {
- return fmt.Errorf("unable to load session: %s", errorFromErrorQueue())
- }
- defer C.SSL_SESSION_free(s)
-
- ret := C.SSL_set_session(c.ssl, s)
- if ret != 1 {
- return fmt.Errorf("unable to set session: %s", errorFromErrorQueue())
- }
- return nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/ctx.go b/vendor/github.com/libp2p/go-openssl/ctx.go
deleted file mode 100644
index 3bebf0d5c..000000000
--- a/vendor/github.com/libp2p/go-openssl/ctx.go
+++ /dev/null
@@ -1,618 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "runtime"
- "sync"
- "time"
- "unsafe"
-
- "github.com/mattn/go-pointer"
- "github.com/spacemonkeygo/spacelog"
-)
-
-var (
- ssl_ctx_idx = C.X_SSL_CTX_new_index()
-
- logger = spacelog.GetLogger()
-)
-
-type Ctx struct {
- ctx *C.SSL_CTX
- cert *Certificate
- chain []*Certificate
- key PrivateKey
- verify_cb VerifyCallback
- sni_cb TLSExtServernameCallback
-
- ticket_store_mu sync.Mutex
- ticket_store *TicketStore
-}
-
-//export get_ssl_ctx_idx
-func get_ssl_ctx_idx() C.int {
- return ssl_ctx_idx
-}
-
-func newCtx(method *C.SSL_METHOD) (*Ctx, error) {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- ctx := C.SSL_CTX_new(method)
- if ctx == nil {
- return nil, errorFromErrorQueue()
- }
- c := &Ctx{ctx: ctx}
- C.SSL_CTX_set_ex_data(ctx, get_ssl_ctx_idx(), pointer.Save(c))
- runtime.SetFinalizer(c, func(c *Ctx) {
- C.SSL_CTX_free(c.ctx)
- })
- return c, nil
-}
-
-type SSLVersion int
-
-const (
- SSLv3 SSLVersion = 0x02 // Vulnerable to "POODLE" attack.
- TLSv1 SSLVersion = 0x03
- TLSv1_1 SSLVersion = 0x04
- TLSv1_2 SSLVersion = 0x05
-
- // Make sure to disable SSLv2 and SSLv3 if you use this. SSLv3 is vulnerable
- // to the "POODLE" attack, and SSLv2 is what, just don't even.
- AnyVersion SSLVersion = 0x06
-)
-
-// NewCtxWithVersion creates an SSL context that is specific to the provided
-// SSL version. See http://www.openssl.org/docs/ssl/SSL_CTX_new.html for more.
-func NewCtxWithVersion(version SSLVersion) (*Ctx, error) {
- var method *C.SSL_METHOD
- switch version {
- case SSLv3:
- method = C.X_SSLv3_method()
- case TLSv1:
- method = C.X_TLSv1_method()
- case TLSv1_1:
- method = C.X_TLSv1_1_method()
- case TLSv1_2:
- method = C.X_TLSv1_2_method()
- case AnyVersion:
- method = C.X_SSLv23_method()
- }
- if method == nil {
- return nil, errors.New("unknown ssl/tls version")
- }
- return newCtx(method)
-}
-
-// NewCtx creates a context that supports any TLS version 1.0 and newer.
-func NewCtx() (*Ctx, error) {
- c, err := NewCtxWithVersion(AnyVersion)
- if err == nil {
- c.SetOptions(NoSSLv2 | NoSSLv3)
- }
- return c, err
-}
-
-// NewCtxFromFiles calls NewCtx, loads the provided files, and configures the
-// context to use them.
-func NewCtxFromFiles(cert_file string, key_file string) (*Ctx, error) {
- ctx, err := NewCtx()
- if err != nil {
- return nil, err
- }
-
- cert_bytes, err := ioutil.ReadFile(cert_file)
- if err != nil {
- return nil, err
- }
-
- certs := SplitPEM(cert_bytes)
- if len(certs) == 0 {
- return nil, fmt.Errorf("no PEM certificate found in '%s'", cert_file)
- }
- first, certs := certs[0], certs[1:]
- cert, err := LoadCertificateFromPEM(first)
- if err != nil {
- return nil, err
- }
-
- err = ctx.UseCertificate(cert)
- if err != nil {
- return nil, err
- }
-
- for _, pem := range certs {
- cert, err := LoadCertificateFromPEM(pem)
- if err != nil {
- return nil, err
- }
- err = ctx.AddChainCertificate(cert)
- if err != nil {
- return nil, err
- }
- }
-
- key_bytes, err := ioutil.ReadFile(key_file)
- if err != nil {
- return nil, err
- }
-
- key, err := LoadPrivateKeyFromPEM(key_bytes)
- if err != nil {
- return nil, err
- }
-
- err = ctx.UsePrivateKey(key)
- if err != nil {
- return nil, err
- }
-
- return ctx, nil
-}
-
-// EllipticCurve repesents the ASN.1 OID of an elliptic curve.
-// see https://www.openssl.org/docs/apps/ecparam.html for a list of implemented curves.
-type EllipticCurve int
-
-const (
- // P-256: X9.62/SECG curve over a 256 bit prime field
- Prime256v1 EllipticCurve = C.NID_X9_62_prime256v1
- // P-384: NIST/SECG curve over a 384 bit prime field
- Secp384r1 EllipticCurve = C.NID_secp384r1
- // P-521: NIST/SECG curve over a 521 bit prime field
- Secp521r1 EllipticCurve = C.NID_secp521r1
-)
-
-// SetEllipticCurve sets the elliptic curve used by the SSL context to
-// enable an ECDH cipher suite to be selected during the handshake.
-func (c *Ctx) SetEllipticCurve(curve EllipticCurve) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- k := C.EC_KEY_new_by_curve_name(C.int(curve))
- if k == nil {
- return errors.New("unknown curve")
- }
- defer C.EC_KEY_free(k)
-
- if int(C.X_SSL_CTX_set_tmp_ecdh(c.ctx, k)) != 1 {
- return errorFromErrorQueue()
- }
-
- return nil
-}
-
-// UseCertificate configures the context to present the given certificate to
-// peers.
-func (c *Ctx) UseCertificate(cert *Certificate) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- c.cert = cert
- if int(C.SSL_CTX_use_certificate(c.ctx, cert.x)) != 1 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-// AddChainCertificate adds a certificate to the chain presented in the
-// handshake.
-func (c *Ctx) AddChainCertificate(cert *Certificate) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- c.chain = append(c.chain, cert)
- if int(C.X_SSL_CTX_add_extra_chain_cert(c.ctx, cert.x)) != 1 {
- return errorFromErrorQueue()
- }
- // OpenSSL takes ownership via SSL_CTX_add_extra_chain_cert
- runtime.SetFinalizer(cert, nil)
- return nil
-}
-
-// UsePrivateKey configures the context to use the given private key for SSL
-// handshakes.
-func (c *Ctx) UsePrivateKey(key PrivateKey) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- c.key = key
- if int(C.SSL_CTX_use_PrivateKey(c.ctx, key.evpPKey())) != 1 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-type CertificateStore struct {
- store *C.X509_STORE
- // for GC
- ctx *Ctx
- certs []*Certificate
-}
-
-// Allocate a new, empty CertificateStore
-func NewCertificateStore() (*CertificateStore, error) {
- s := C.X509_STORE_new()
- if s == nil {
- return nil, errors.New("failed to allocate X509_STORE")
- }
- store := &CertificateStore{store: s}
- runtime.SetFinalizer(store, func(s *CertificateStore) {
- C.X509_STORE_free(s.store)
- })
- return store, nil
-}
-
-// Parse a chained PEM file, loading all certificates into the Store.
-func (s *CertificateStore) LoadCertificatesFromPEM(data []byte) error {
- pems := SplitPEM(data)
- for _, pem := range pems {
- cert, err := LoadCertificateFromPEM(pem)
- if err != nil {
- return err
- }
- err = s.AddCertificate(cert)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// GetCertificateStore returns the context's certificate store that will be
-// used for peer validation.
-func (c *Ctx) GetCertificateStore() *CertificateStore {
- // we don't need to dealloc the cert store pointer here, because it points
- // to a ctx internal. so we do need to keep the ctx around
- return &CertificateStore{
- store: C.SSL_CTX_get_cert_store(c.ctx),
- ctx: c}
-}
-
-// AddCertificate marks the provided Certificate as a trusted certificate in
-// the given CertificateStore.
-func (s *CertificateStore) AddCertificate(cert *Certificate) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- s.certs = append(s.certs, cert)
- if int(C.X509_STORE_add_cert(s.store, cert.x)) != 1 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-type CertificateStoreCtx struct {
- ctx *C.X509_STORE_CTX
- ssl_ctx *Ctx
-}
-
-func (csc *CertificateStoreCtx) VerifyResult() VerifyResult {
- return VerifyResult(C.X509_STORE_CTX_get_error(csc.ctx))
-}
-
-func (csc *CertificateStoreCtx) Err() error {
- code := C.X509_STORE_CTX_get_error(csc.ctx)
- if code == C.X509_V_OK {
- return nil
- }
- return fmt.Errorf("openssl: %s",
- C.GoString(C.X509_verify_cert_error_string(C.long(code))))
-}
-
-func (csc *CertificateStoreCtx) Depth() int {
- return int(C.X509_STORE_CTX_get_error_depth(csc.ctx))
-}
-
-// the certificate returned is only valid for the lifetime of the underlying
-// X509_STORE_CTX
-func (csc *CertificateStoreCtx) GetCurrentCert() *Certificate {
- x509 := C.X509_STORE_CTX_get_current_cert(csc.ctx)
- if x509 == nil {
- return nil
- }
- // add a ref
- if C.X_X509_add_ref(x509) != 1 {
- return nil
- }
- cert := &Certificate{
- x: x509,
- }
- runtime.SetFinalizer(cert, func(cert *Certificate) {
- C.X509_free(cert.x)
- })
- return cert
-}
-
-// LoadVerifyLocations tells the context to trust all certificate authorities
-// provided in either the ca_file or the ca_path.
-// See http://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html for
-// more.
-func (c *Ctx) LoadVerifyLocations(ca_file string, ca_path string) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- var c_ca_file, c_ca_path *C.char
- if ca_file != "" {
- c_ca_file = C.CString(ca_file)
- defer C.free(unsafe.Pointer(c_ca_file))
- }
- if ca_path != "" {
- c_ca_path = C.CString(ca_path)
- defer C.free(unsafe.Pointer(c_ca_path))
- }
- if C.SSL_CTX_load_verify_locations(c.ctx, c_ca_file, c_ca_path) != 1 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-type Version int
-
-const (
- SSL3_VERSION Version = C.SSL3_VERSION
- TLS1_VERSION Version = C.TLS1_VERSION
- TLS1_1_VERSION Version = C.TLS1_1_VERSION
- TLS1_2_VERSION Version = C.TLS1_2_VERSION
- TLS1_3_VERSION Version = C.TLS1_3_VERSION
- DTLS1_VERSION Version = C.DTLS1_VERSION
- DTLS1_2_VERSION Version = C.DTLS1_2_VERSION
-)
-
-// SetMinProtoVersion sets the minimum supported protocol version for the Ctx.
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_min_proto_version.html
-func (c *Ctx) SetMinProtoVersion(version Version) bool {
- return C.X_SSL_CTX_set_min_proto_version(
- c.ctx, C.int(version)) == 1
-}
-
-// SetMaxProtoVersion sets the maximum supported protocol version for the Ctx.
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_max_proto_version.html
-func (c *Ctx) SetMaxProtoVersion(version Version) bool {
- return C.X_SSL_CTX_set_max_proto_version(
- c.ctx, C.int(version)) == 1
-}
-
-type Options int
-
-const (
- // NoCompression is only valid if you are using OpenSSL 1.0.1 or newer
- NoCompression Options = C.SSL_OP_NO_COMPRESSION
- NoSSLv2 Options = C.SSL_OP_NO_SSLv2
- NoSSLv3 Options = C.SSL_OP_NO_SSLv3
- NoTLSv1 Options = C.SSL_OP_NO_TLSv1
- CipherServerPreference Options = C.SSL_OP_CIPHER_SERVER_PREFERENCE
- NoSessionResumptionOrRenegotiation Options = C.SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
- NoTicket Options = C.SSL_OP_NO_TICKET
-)
-
-// SetOptions sets context options. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
-func (c *Ctx) SetOptions(options Options) Options {
- return Options(C.X_SSL_CTX_set_options(
- c.ctx, C.long(options)))
-}
-
-func (c *Ctx) ClearOptions(options Options) Options {
- return Options(C.X_SSL_CTX_clear_options(
- c.ctx, C.long(options)))
-}
-
-// GetOptions returns context options. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
-func (c *Ctx) GetOptions() Options {
- return Options(C.X_SSL_CTX_get_options(c.ctx))
-}
-
-type Modes int
-
-const (
- // ReleaseBuffers is only valid if you are using OpenSSL 1.0.1 or newer
- ReleaseBuffers Modes = C.SSL_MODE_RELEASE_BUFFERS
-)
-
-// SetMode sets context modes. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html
-func (c *Ctx) SetMode(modes Modes) Modes {
- return Modes(C.X_SSL_CTX_set_mode(c.ctx, C.long(modes)))
-}
-
-// GetMode returns context modes. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html
-func (c *Ctx) GetMode() Modes {
- return Modes(C.X_SSL_CTX_get_mode(c.ctx))
-}
-
-type VerifyOptions int
-
-const (
- VerifyNone VerifyOptions = C.SSL_VERIFY_NONE
- VerifyPeer VerifyOptions = C.SSL_VERIFY_PEER
- VerifyFailIfNoPeerCert VerifyOptions = C.SSL_VERIFY_FAIL_IF_NO_PEER_CERT
- VerifyClientOnce VerifyOptions = C.SSL_VERIFY_CLIENT_ONCE
-)
-
-type VerifyCallback func(ok bool, store *CertificateStoreCtx) bool
-
-//export go_ssl_ctx_verify_cb_thunk
-func go_ssl_ctx_verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int {
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: verify callback panic'd: %v", err)
- os.Exit(1)
- }
- }()
- verify_cb := pointer.Restore(p).(*Ctx).verify_cb
- // set up defaults just in case verify_cb is nil
- if verify_cb != nil {
- store := &CertificateStoreCtx{ctx: ctx}
- if verify_cb(ok == 1, store) {
- ok = 1
- } else {
- ok = 0
- }
- }
- return ok
-}
-
-// SetVerify controls peer verification settings. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (c *Ctx) SetVerify(options VerifyOptions, verify_cb VerifyCallback) {
- c.verify_cb = verify_cb
- if verify_cb != nil {
- C.SSL_CTX_set_verify(c.ctx, C.int(options), (*[0]byte)(C.X_SSL_CTX_verify_cb))
- } else {
- C.SSL_CTX_set_verify(c.ctx, C.int(options), nil)
- }
-}
-
-func (c *Ctx) SetVerifyMode(options VerifyOptions) {
- c.SetVerify(options, c.verify_cb)
-}
-
-func (c *Ctx) SetVerifyCallback(verify_cb VerifyCallback) {
- c.SetVerify(c.VerifyMode(), verify_cb)
-}
-
-func (c *Ctx) GetVerifyCallback() VerifyCallback {
- return c.verify_cb
-}
-
-func (c *Ctx) VerifyMode() VerifyOptions {
- return VerifyOptions(C.SSL_CTX_get_verify_mode(c.ctx))
-}
-
-// SetVerifyDepth controls how many certificates deep the certificate
-// verification logic is willing to follow a certificate chain. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (c *Ctx) SetVerifyDepth(depth int) {
- C.SSL_CTX_set_verify_depth(c.ctx, C.int(depth))
-}
-
-// GetVerifyDepth controls how many certificates deep the certificate
-// verification logic is willing to follow a certificate chain. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (c *Ctx) GetVerifyDepth() int {
- return int(C.SSL_CTX_get_verify_depth(c.ctx))
-}
-
-type TLSExtServernameCallback func(ssl *SSL) SSLTLSExtErr
-
-// SetTLSExtServernameCallback sets callback function for Server Name Indication
-// (SNI) rfc6066 (http://tools.ietf.org/html/rfc6066). See
-// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni
-func (c *Ctx) SetTLSExtServernameCallback(sni_cb TLSExtServernameCallback) {
- c.sni_cb = sni_cb
- C.X_SSL_CTX_set_tlsext_servername_callback(c.ctx, (*[0]byte)(C.sni_cb))
-}
-
-func (c *Ctx) SetSessionId(session_id []byte) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- var ptr *C.uchar
- if len(session_id) > 0 {
- ptr = (*C.uchar)(unsafe.Pointer(&session_id[0]))
- }
- if int(C.SSL_CTX_set_session_id_context(c.ctx, ptr,
- C.uint(len(session_id)))) == 0 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-// SetCipherList sets the list of available ciphers. The format of the list is
-// described at http://www.openssl.org/docs/apps/ciphers.html, but see
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_cipher_list.html for more.
-func (c *Ctx) SetCipherList(list string) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- clist := C.CString(list)
- defer C.free(unsafe.Pointer(clist))
- if int(C.SSL_CTX_set_cipher_list(c.ctx, clist)) == 0 {
- return errorFromErrorQueue()
- }
- return nil
-}
-
-// SetNextProtos sets Negotiation protocol to the ctx.
-func (c *Ctx) SetNextProtos(protos []string) error {
- if len(protos) == 0 {
- return nil
- }
- vector := make([]byte, 0)
- for _, proto := range protos {
- if len(proto) > 255 {
- return fmt.Errorf(
- "proto length can't be more than 255. But got a proto %s with length %d",
- proto, len(proto))
- }
- vector = append(vector, byte(uint8(len(proto))))
- vector = append(vector, []byte(proto)...)
- }
- ret := int(C.SSL_CTX_set_alpn_protos(c.ctx, (*C.uchar)(unsafe.Pointer(&vector[0])),
- C.uint(len(vector))))
- if ret != 0 {
- return errors.New("error while setting protos to ctx")
- }
- return nil
-}
-
-type SessionCacheModes int
-
-const (
- SessionCacheOff SessionCacheModes = C.SSL_SESS_CACHE_OFF
- SessionCacheClient SessionCacheModes = C.SSL_SESS_CACHE_CLIENT
- SessionCacheServer SessionCacheModes = C.SSL_SESS_CACHE_SERVER
- SessionCacheBoth SessionCacheModes = C.SSL_SESS_CACHE_BOTH
- NoAutoClear SessionCacheModes = C.SSL_SESS_CACHE_NO_AUTO_CLEAR
- NoInternalLookup SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP
- NoInternalStore SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_STORE
- NoInternal SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL
-)
-
-// SetSessionCacheMode enables or disables session caching. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_session_cache_mode.html
-func (c *Ctx) SetSessionCacheMode(modes SessionCacheModes) SessionCacheModes {
- return SessionCacheModes(
- C.X_SSL_CTX_set_session_cache_mode(c.ctx, C.long(modes)))
-}
-
-// Set session cache timeout. Returns previously set value.
-// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html
-func (c *Ctx) SetTimeout(t time.Duration) time.Duration {
- prev := C.X_SSL_CTX_set_timeout(c.ctx, C.long(t/time.Second))
- return time.Duration(prev) * time.Second
-}
-
-// Get session cache timeout.
-// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html
-func (c *Ctx) GetTimeout() time.Duration {
- return time.Duration(C.X_SSL_CTX_get_timeout(c.ctx)) * time.Second
-}
-
-// Set session cache size. Returns previously set value.
-// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html
-func (c *Ctx) SessSetCacheSize(t int) int {
- return int(C.X_SSL_CTX_sess_set_cache_size(c.ctx, C.long(t)))
-}
-
-// Get session cache size.
-// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html
-func (c *Ctx) SessGetCacheSize() int {
- return int(C.X_SSL_CTX_sess_get_cache_size(c.ctx))
-}
diff --git a/vendor/github.com/libp2p/go-openssl/dh.go b/vendor/github.com/libp2p/go-openssl/dh.go
deleted file mode 100644
index 75ac5ad42..000000000
--- a/vendor/github.com/libp2p/go-openssl/dh.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-import (
- "errors"
- "unsafe"
-)
-
-// DeriveSharedSecret derives a shared secret using a private key and a peer's
-// public key.
-// The specific algorithm that is used depends on the types of the
-// keys, but it is most commonly a variant of Diffie-Hellman.
-func DeriveSharedSecret(private PrivateKey, public PublicKey) ([]byte, error) {
- // Create context for the shared secret derivation
- dhCtx := C.EVP_PKEY_CTX_new(private.evpPKey(), nil)
- if dhCtx == nil {
- return nil, errors.New("failed creating shared secret derivation context")
- }
- defer C.EVP_PKEY_CTX_free(dhCtx)
-
- // Initialize the context
- if int(C.EVP_PKEY_derive_init(dhCtx)) != 1 {
- return nil, errors.New("failed initializing shared secret derivation context")
- }
-
- // Provide the peer's public key
- if int(C.EVP_PKEY_derive_set_peer(dhCtx, public.evpPKey())) != 1 {
- return nil, errors.New("failed adding peer public key to context")
- }
-
- // Determine how large of a buffer we need for the shared secret
- var buffLen C.size_t
- if int(C.EVP_PKEY_derive(dhCtx, nil, &buffLen)) != 1 {
- return nil, errors.New("failed determining shared secret length")
- }
-
- // Allocate a buffer
- buffer := C.X_OPENSSL_malloc(buffLen)
- if buffer == nil {
- return nil, errors.New("failed allocating buffer for shared secret")
- }
- defer C.X_OPENSSL_free(buffer)
-
- // Derive the shared secret
- if int(C.EVP_PKEY_derive(dhCtx, (*C.uchar)(buffer), &buffLen)) != 1 {
- return nil, errors.New("failed deriving the shared secret")
- }
-
- secret := C.GoBytes(unsafe.Pointer(buffer), C.int(buffLen))
- return secret, nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/dhparam.go b/vendor/github.com/libp2p/go-openssl/dhparam.go
deleted file mode 100644
index 294d0645c..000000000
--- a/vendor/github.com/libp2p/go-openssl/dhparam.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-type DH struct {
- dh *C.struct_dh_st
-}
-
-// LoadDHParametersFromPEM loads the Diffie-Hellman parameters from
-// a PEM-encoded block.
-func LoadDHParametersFromPEM(pem_block []byte) (*DH, error) {
- if len(pem_block) == 0 {
- return nil, errors.New("empty pem block")
- }
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
- C.int(len(pem_block)))
- if bio == nil {
- return nil, errors.New("failed creating bio")
- }
- defer C.BIO_free(bio)
-
- params := C.PEM_read_bio_DHparams(bio, nil, nil, nil)
- if params == nil {
- return nil, errors.New("failed reading dh parameters")
- }
- dhparams := &DH{dh: params}
- runtime.SetFinalizer(dhparams, func(dhparams *DH) {
- C.DH_free(dhparams.dh)
- })
- return dhparams, nil
-}
-
-// SetDHParameters sets the DH group (DH parameters) used to
-// negotiate an emphemeral DH key during handshaking.
-func (c *Ctx) SetDHParameters(dh *DH) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- if int(C.X_SSL_CTX_set_tmp_dh(c.ctx, dh.dh)) != 1 {
- return errorFromErrorQueue()
- }
- return nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/digest.go b/vendor/github.com/libp2p/go-openssl/digest.go
deleted file mode 100644
index 6d8d2635a..000000000
--- a/vendor/github.com/libp2p/go-openssl/digest.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "fmt"
- "unsafe"
-)
-
-// Digest represents and openssl message digest.
-type Digest struct {
- ptr *C.EVP_MD
-}
-
-// GetDigestByName returns the Digest with the name or nil and an error if the
-// digest was not found.
-func GetDigestByName(name string) (*Digest, error) {
- cname := C.CString(name)
- defer C.free(unsafe.Pointer(cname))
- p := C.X_EVP_get_digestbyname(cname)
- if p == nil {
- return nil, fmt.Errorf("Digest %v not found", name)
- }
- // we can consider digests to use static mem; don't need to free
- return &Digest{ptr: p}, nil
-}
-
-// GetDigestByName returns the Digest with the NID or nil and an error if the
-// digest was not found.
-func GetDigestByNid(nid NID) (*Digest, error) {
- sn, err := Nid2ShortName(nid)
- if err != nil {
- return nil, err
- }
- return GetDigestByName(sn)
-}
diff --git a/vendor/github.com/libp2p/go-openssl/engine.go b/vendor/github.com/libp2p/go-openssl/engine.go
deleted file mode 100644
index 78aef956f..000000000
--- a/vendor/github.com/libp2p/go-openssl/engine.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-/*
-#include "openssl/engine.h"
-*/
-import "C"
-
-import (
- "fmt"
- "runtime"
- "unsafe"
-)
-
-type Engine struct {
- e *C.ENGINE
-}
-
-func EngineById(name string) (*Engine, error) {
- cname := C.CString(name)
- defer C.free(unsafe.Pointer(cname))
- e := &Engine{
- e: C.ENGINE_by_id(cname),
- }
- if e.e == nil {
- return nil, fmt.Errorf("engine %s missing", name)
- }
- if C.ENGINE_init(e.e) == 0 {
- C.ENGINE_free(e.e)
- return nil, fmt.Errorf("engine %s not initialized", name)
- }
- runtime.SetFinalizer(e, func(e *Engine) {
- C.ENGINE_finish(e.e)
- C.ENGINE_free(e.e)
- })
- return e, nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/extension.c b/vendor/github.com/libp2p/go-openssl/extension.c
deleted file mode 100644
index 99f1ca3d9..000000000
--- a/vendor/github.com/libp2p/go-openssl/extension.c
+++ /dev/null
@@ -1,40 +0,0 @@
-
-
-#include
-#include
-
-const unsigned char * get_extention(X509 *x, int NID, int *data_len){
- int loc;
- ASN1_OCTET_STRING *octet_str;
- long xlen;
- int tag, xclass;
-
- loc = X509_get_ext_by_NID( x, NID, -1);
- X509_EXTENSION *ex = X509_get_ext(x, loc);
- octet_str = X509_EXTENSION_get_data(ex);
- *data_len = octet_str->length;
- return octet_str->data;
-}
-
-// Copied from https://github.com/libtor/openssl/blob/master/demos/x509/mkcert.c#L153
-int add_custom_ext(X509 *cert, int nid,unsigned char *value, int len)
-{
- X509_EXTENSION *ex;
- ASN1_OCTET_STRING *os = ASN1_OCTET_STRING_new();
- ASN1_OCTET_STRING_set(os,value,len);
- X509V3_CTX ctx;
- /* This sets the 'context' of the extensions. */
- /* No configuration database */
- X509V3_set_ctx_nodb(&ctx);
- /* Issuer and subject certs: both the target since it is self signed,
- * no request and no CRL
- */
- X509V3_set_ctx(&ctx, cert, cert, NULL, NULL, 0);
- // ref http://openssl.6102.n7.nabble.com/Adding-a-custom-extension-to-a-CSR-td47446.html
- ex = X509_EXTENSION_create_by_NID( NULL, nid, 0, os);
- if (!X509_add_ext(cert,ex,-1))
- return 0;
-
- X509_EXTENSION_free(ex);
- return 1;
-}
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-openssl/fips.go b/vendor/github.com/libp2p/go-openssl/fips.go
deleted file mode 100644
index b15b9bf06..000000000
--- a/vendor/github.com/libp2p/go-openssl/fips.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-/*
-#include
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000L
- int FIPS_mode_set(int ONOFF) {
- return 0;
- }
-#endif
-
-*/
-import "C"
-import "errors"
-import "runtime"
-
-// FIPSModeSet enables a FIPS 140-2 validated mode of operation.
-// https://wiki.openssl.org/index.php/FIPS_mode_set()
-// This call has been deleted from OpenSSL 3.0.
-func FIPSModeSet(mode bool) error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
-
- if C.OPENSSL_VERSION_NUMBER >= 0x30000000 {
- return errors.New("FIPS_mode_set() has been deleted from OpenSSL 3.0")
- }
-
- var r C.int
- if mode {
- r = C.FIPS_mode_set(1)
- } else {
- r = C.FIPS_mode_set(0)
- }
- if r != 1 {
- return errorFromErrorQueue()
- }
- return nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/hmac.go b/vendor/github.com/libp2p/go-openssl/hmac.go
deleted file mode 100644
index 77e8dc58c..000000000
--- a/vendor/github.com/libp2p/go-openssl/hmac.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-type HMAC struct {
- ctx *C.HMAC_CTX
- engine *Engine
- md *C.EVP_MD
-}
-
-func NewHMAC(key []byte, digestAlgorithm EVP_MD) (*HMAC, error) {
- return NewHMACWithEngine(key, digestAlgorithm, nil)
-}
-
-func NewHMACWithEngine(key []byte, digestAlgorithm EVP_MD, e *Engine) (*HMAC, error) {
- var md *C.EVP_MD = getDigestFunction(digestAlgorithm)
- h := &HMAC{engine: e, md: md}
- h.ctx = C.X_HMAC_CTX_new()
- if h.ctx == nil {
- return nil, errors.New("unable to allocate HMAC_CTX")
- }
-
- var c_e *C.ENGINE
- if e != nil {
- c_e = e.e
- }
- if rc := C.X_HMAC_Init_ex(h.ctx,
- unsafe.Pointer(&key[0]),
- C.int(len(key)),
- md,
- c_e); rc != 1 {
- C.X_HMAC_CTX_free(h.ctx)
- return nil, errors.New("failed to initialize HMAC_CTX")
- }
-
- runtime.SetFinalizer(h, func(h *HMAC) { h.Close() })
- return h, nil
-}
-
-func (h *HMAC) Close() {
- C.X_HMAC_CTX_free(h.ctx)
-}
-
-func (h *HMAC) Write(data []byte) (n int, err error) {
- if len(data) == 0 {
- return 0, nil
- }
- if rc := C.X_HMAC_Update(h.ctx, (*C.uchar)(unsafe.Pointer(&data[0])),
- C.size_t(len(data))); rc != 1 {
- return 0, errors.New("failed to update HMAC")
- }
- return len(data), nil
-}
-
-func (h *HMAC) Reset() error {
- if C.X_HMAC_Init_ex(h.ctx, nil, 0, nil, nil) != 1 {
- return errors.New("failed to reset HMAC_CTX")
- }
- return nil
-}
-
-func (h *HMAC) Final() (result []byte, err error) {
- mdLength := C.X_EVP_MD_size(h.md)
- result = make([]byte, mdLength)
- if rc := C.X_HMAC_Final(h.ctx, (*C.uchar)(unsafe.Pointer(&result[0])),
- (*C.uint)(unsafe.Pointer(&mdLength))); rc != 1 {
- return nil, errors.New("failed to finalized HMAC")
- }
- return result, h.Reset()
-}
diff --git a/vendor/github.com/libp2p/go-openssl/hostname.c b/vendor/github.com/libp2p/go-openssl/hostname.c
deleted file mode 100644
index 0bffecad6..000000000
--- a/vendor/github.com/libp2p/go-openssl/hostname.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * Go-OpenSSL notice:
- * This file is required for all OpenSSL versions prior to 1.1.0. This simply
- * provides the new 1.1.0 X509_check_* methods for hostname validation if they
- * don't already exist.
- */
-
-#include
-
-#ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT
-
-/* portions from x509v3.h and v3_utl.c */
-/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
- * project.
- */
-/* ====================================================================
- * Copyright (c) 1999-2003 The OpenSSL Project. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * 3. All advertising materials mentioning features or use of this
- * software must display the following acknowledgment:
- * "This product includes software developed by the OpenSSL Project
- * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
- *
- * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- * endorse or promote products derived from this software without
- * prior written permission. For written permission, please contact
- * licensing@OpenSSL.org.
- *
- * 5. Products derived from this software may not be called "OpenSSL"
- * nor may "OpenSSL" appear in their names without prior written
- * permission of the OpenSSL Project.
- *
- * 6. Redistributions of any form whatsoever must retain the following
- * acknowledgment:
- * "This product includes software developed by the OpenSSL Project
- * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
- *
- * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
- * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
- * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- * ====================================================================
- *
- * This product includes cryptographic software written by Eric Young
- * (eay@cryptsoft.com). This product includes software written by Tim
- * Hudson (tjh@cryptsoft.com).
- *
- */
-/* X509 v3 extension utilities */
-
-#include
-#include
-#include
-#include
-#include
-
-#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1
-#define X509_CHECK_FLAG_NO_WILDCARDS 0x2
-
-typedef int (*equal_fn)(const unsigned char *pattern, size_t pattern_len,
- const unsigned char *subject, size_t subject_len);
-
-/* Compare while ASCII ignoring case. */
-static int equal_nocase(const unsigned char *pattern, size_t pattern_len,
- const unsigned char *subject, size_t subject_len)
- {
- if (pattern_len != subject_len)
- return 0;
- while (pattern_len)
- {
- unsigned char l = *pattern;
- unsigned char r = *subject;
- /* The pattern must not contain NUL characters. */
- if (l == 0)
- return 0;
- if (l != r)
- {
- if ('A' <= l && l <= 'Z')
- l = (l - 'A') + 'a';
- if ('A' <= r && r <= 'Z')
- r = (r - 'A') + 'a';
- if (l != r)
- return 0;
- }
- ++pattern;
- ++subject;
- --pattern_len;
- }
- return 1;
- }
-
-/* Compare using memcmp. */
-static int equal_case(const unsigned char *pattern, size_t pattern_len,
- const unsigned char *subject, size_t subject_len)
-{
- /* The pattern must not contain NUL characters. */
- if (memchr(pattern, '\0', pattern_len) != NULL)
- return 0;
- if (pattern_len != subject_len)
- return 0;
- return !memcmp(pattern, subject, pattern_len);
-}
-
-/* RFC 5280, section 7.5, requires that only the domain is compared in
- a case-insensitive manner. */
-static int equal_email(const unsigned char *a, size_t a_len,
- const unsigned char *b, size_t b_len)
- {
- size_t i = a_len;
- if (a_len != b_len)
- return 0;
- /* We search backwards for the '@' character, so that we do
- not have to deal with quoted local-parts. The domain part
- is compared in a case-insensitive manner. */
- while (i > 0)
- {
- --i;
- if (a[i] == '@' || b[i] == '@')
- {
- if (!equal_nocase(a + i, a_len - i,
- b + i, a_len - i))
- return 0;
- break;
- }
- }
- if (i == 0)
- i = a_len;
- return equal_case(a, i, b, i);
- }
-
-/* Compare the prefix and suffix with the subject, and check that the
- characters in-between are valid. */
-static int wildcard_match(const unsigned char *prefix, size_t prefix_len,
- const unsigned char *suffix, size_t suffix_len,
- const unsigned char *subject, size_t subject_len)
- {
- const unsigned char *wildcard_start;
- const unsigned char *wildcard_end;
- const unsigned char *p;
- if (subject_len < prefix_len + suffix_len)
- return 0;
- if (!equal_nocase(prefix, prefix_len, subject, prefix_len))
- return 0;
- wildcard_start = subject + prefix_len;
- wildcard_end = subject + (subject_len - suffix_len);
- if (!equal_nocase(wildcard_end, suffix_len, suffix, suffix_len))
- return 0;
- /* The wildcard must match at least one character. */
- if (wildcard_start == wildcard_end)
- return 0;
- /* Check that the part matched by the wildcard contains only
- permitted characters and only matches a single label. */
- for (p = wildcard_start; p != wildcard_end; ++p)
- if (!(('0' <= *p && *p <= '9') ||
- ('A' <= *p && *p <= 'Z') ||
- ('a' <= *p && *p <= 'z') ||
- *p == '-'))
- return 0;
- return 1;
- }
-
-/* Checks if the memory region consistens of [0-9A-Za-z.-]. */
-static int valid_domain_characters(const unsigned char *p, size_t len)
- {
- while (len)
- {
- if (!(('0' <= *p && *p <= '9') ||
- ('A' <= *p && *p <= 'Z') ||
- ('a' <= *p && *p <= 'z') ||
- *p == '-' || *p == '.'))
- return 0;
- ++p;
- --len;
- }
- return 1;
- }
-
-/* Find the '*' in a wildcard pattern. If no such character is found
- or the pattern is otherwise invalid, returns NULL. */
-static const unsigned char *wildcard_find_star(const unsigned char *pattern,
- size_t pattern_len)
- {
- const unsigned char *star = memchr(pattern, '*', pattern_len);
- size_t dot_count = 0;
- const unsigned char *suffix_start;
- size_t suffix_length;
- if (star == NULL)
- return NULL;
- suffix_start = star + 1;
- suffix_length = (pattern + pattern_len) - (star + 1);
- if (!(valid_domain_characters(pattern, star - pattern) &&
- valid_domain_characters(suffix_start, suffix_length)))
- return NULL;
- /* Check that the suffix matches at least two labels. */
- while (suffix_length)
- {
- if (*suffix_start == '.')
- ++dot_count;
- ++suffix_start;
- --suffix_length;
- }
- if (dot_count < 2)
- return NULL;
- return star;
- }
-
-/* Compare using wildcards. */
-static int equal_wildcard(const unsigned char *pattern, size_t pattern_len,
- const unsigned char *subject, size_t subject_len)
- {
- const unsigned char *star = wildcard_find_star(pattern, pattern_len);
- if (star == NULL)
- return equal_nocase(pattern, pattern_len,
- subject, subject_len);
- return wildcard_match(pattern, star - pattern,
- star + 1, (pattern + pattern_len) - star - 1,
- subject, subject_len);
- }
-
-/* Compare an ASN1_STRING to a supplied string. If they match
- * return 1. If cmp_type > 0 only compare if string matches the
- * type, otherwise convert it to UTF8.
- */
-
-static int do_check_string(ASN1_STRING *a, int cmp_type, equal_fn equal,
- const unsigned char *b, size_t blen)
- {
- if (!a->data || !a->length)
- return 0;
- if (cmp_type > 0)
- {
- if (cmp_type != a->type)
- return 0;
- if (cmp_type == V_ASN1_IA5STRING)
- return equal(a->data, a->length, b, blen);
- if (a->length == (int)blen && !memcmp(a->data, b, blen))
- return 1;
- else
- return 0;
- }
- else
- {
- int astrlen, rv;
- unsigned char *astr;
- astrlen = ASN1_STRING_to_UTF8(&astr, a);
- if (astrlen < 0)
- return -1;
- rv = equal(astr, astrlen, b, blen);
- OPENSSL_free(astr);
- return rv;
- }
- }
-
-static int do_x509_check(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags, int check_type)
- {
- STACK_OF(GENERAL_NAME) *gens = NULL;
- X509_NAME *name = NULL;
- int i;
- int cnid;
- int alt_type;
- equal_fn equal;
- if (check_type == GEN_EMAIL)
- {
- cnid = NID_pkcs9_emailAddress;
- alt_type = V_ASN1_IA5STRING;
- equal = equal_email;
- }
- else if (check_type == GEN_DNS)
- {
- cnid = NID_commonName;
- alt_type = V_ASN1_IA5STRING;
- if (flags & X509_CHECK_FLAG_NO_WILDCARDS)
- equal = equal_nocase;
- else
- equal = equal_wildcard;
- }
- else
- {
- cnid = 0;
- alt_type = V_ASN1_OCTET_STRING;
- equal = equal_case;
- }
-
- if (chklen == 0)
- chklen = strlen((const char *)chk);
-
- gens = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL);
- if (gens)
- {
- int rv = 0;
- for (i = 0; i < sk_GENERAL_NAME_num(gens); i++)
- {
- GENERAL_NAME *gen;
- ASN1_STRING *cstr;
- gen = sk_GENERAL_NAME_value(gens, i);
- if(gen->type != check_type)
- continue;
- if (check_type == GEN_EMAIL)
- cstr = gen->d.rfc822Name;
- else if (check_type == GEN_DNS)
- cstr = gen->d.dNSName;
- else
- cstr = gen->d.iPAddress;
- if (do_check_string(cstr, alt_type, equal, chk, chklen))
- {
- rv = 1;
- break;
- }
- }
- GENERAL_NAMES_free(gens);
- if (rv)
- return 1;
- if (!(flags & X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT) || !cnid)
- return 0;
- }
- i = -1;
- name = X509_get_subject_name(x);
- while((i = X509_NAME_get_index_by_NID(name, cnid, i)) >= 0)
- {
- X509_NAME_ENTRY *ne;
- ASN1_STRING *str;
- ne = X509_NAME_get_entry(name, i);
- str = X509_NAME_ENTRY_get_data(ne);
- if (do_check_string(str, -1, equal, chk, chklen))
- return 1;
- }
- return 0;
- }
-
-#if OPENSSL_VERSION_NUMBER < 0x1000200fL
-
-int X509_check_host(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags, char **peername)
- {
- return do_x509_check(x, chk, chklen, flags, GEN_DNS);
- }
-
-int X509_check_email(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags)
- {
- return do_x509_check(x, chk, chklen, flags, GEN_EMAIL);
- }
-
-int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags)
- {
- return do_x509_check(x, chk, chklen, flags, GEN_IPADD);
- }
-
-#endif /* OPENSSL_VERSION_NUMBER < 0x1000200fL */
-
-#endif
diff --git a/vendor/github.com/libp2p/go-openssl/hostname.go b/vendor/github.com/libp2p/go-openssl/hostname.go
deleted file mode 100644
index 9ef4ba293..000000000
--- a/vendor/github.com/libp2p/go-openssl/hostname.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-/*
-#include
-#include
-#if OPENSSL_VERSION_NUMBER >= 0x30000000L
- #include
- typedef const char x509char;
-#else
- #include
-
- #ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT
- #define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1
- #define X509_CHECK_FLAG_NO_WILDCARDS 0x2
-
- extern int X509_check_host(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags, char **peername);
- extern int X509_check_email(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags);
- extern int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen,
- unsigned int flags);
- typedef const unsigned char x509char;
- #else
- typedef const char x509char;
- #endif
-#endif
-*/
-import "C"
-
-import (
- "errors"
- "net"
- "unsafe"
-)
-
-var (
- ValidationError = errors.New("host validation error") //lint:ignore ST1012 rename may cause breaking changes; research before renaming.
-)
-
-type CheckFlags int
-
-const (
- AlwaysCheckSubject CheckFlags = C.X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT
- NoWildcards CheckFlags = C.X509_CHECK_FLAG_NO_WILDCARDS
-)
-
-// CheckHost checks that the X509 certificate is signed for the provided
-// host name. See http://www.openssl.org/docs/crypto/X509_check_host.html for
-// more. Note that CheckHost does not check the IP field. See VerifyHostname.
-// Specifically returns ValidationError if the Certificate didn't match but
-// there was no internal error.
-func (c *Certificate) CheckHost(host string, flags CheckFlags) error {
- chost := unsafe.Pointer(C.CString(host))
- defer C.free(chost)
-
- rv := C.X509_check_host(c.x, (*C.x509char)(chost), C.size_t(len(host)),
- C.uint(flags), nil)
- if rv > 0 {
- return nil
- }
- if rv == 0 {
- return ValidationError
- }
- return errors.New("hostname validation had an internal failure")
-}
-
-// CheckEmail checks that the X509 certificate is signed for the provided
-// email address. See http://www.openssl.org/docs/crypto/X509_check_host.html
-// for more.
-// Specifically returns ValidationError if the Certificate didn't match but
-// there was no internal error.
-func (c *Certificate) CheckEmail(email string, flags CheckFlags) error {
- cemail := unsafe.Pointer(C.CString(email))
- defer C.free(cemail)
- rv := C.X509_check_email(c.x, (*C.x509char)(cemail), C.size_t(len(email)),
- C.uint(flags))
- if rv > 0 {
- return nil
- }
- if rv == 0 {
- return ValidationError
- }
- return errors.New("email validation had an internal failure")
-}
-
-// CheckIP checks that the X509 certificate is signed for the provided
-// IP address. See http://www.openssl.org/docs/crypto/X509_check_host.html
-// for more.
-// Specifically returns ValidationError if the Certificate didn't match but
-// there was no internal error.
-func (c *Certificate) CheckIP(ip net.IP, flags CheckFlags) error {
- // X509_check_ip will fail to validate the 16-byte representation of an IPv4
- // address, so convert to the 4-byte representation.
- if ip4 := ip.To4(); ip4 != nil {
- ip = ip4
- }
-
- cip := unsafe.Pointer(&ip[0])
- rv := C.X509_check_ip(c.x, (*C.uchar)(cip), C.size_t(len(ip)),
- C.uint(flags))
- if rv > 0 {
- return nil
- }
- if rv == 0 {
- return ValidationError
- }
- return errors.New("ip validation had an internal failure")
-}
-
-// VerifyHostname is a combination of CheckHost and CheckIP. If the provided
-// hostname looks like an IP address, it will be checked as an IP address,
-// otherwise it will be checked as a hostname.
-// Specifically returns ValidationError if the Certificate didn't match but
-// there was no internal error.
-func (c *Certificate) VerifyHostname(host string) error {
- var ip net.IP
- if len(host) >= 3 && host[0] == '[' && host[len(host)-1] == ']' {
- ip = net.ParseIP(host[1 : len(host)-1])
- } else {
- ip = net.ParseIP(host)
- }
- if ip != nil {
- return c.CheckIP(ip, 0)
- }
- return c.CheckHost(host, 0)
-}
diff --git a/vendor/github.com/libp2p/go-openssl/http.go b/vendor/github.com/libp2p/go-openssl/http.go
deleted file mode 100644
index 39bd5a28b..000000000
--- a/vendor/github.com/libp2p/go-openssl/http.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-import (
- "net/http"
-)
-
-// ListenAndServeTLS will take an http.Handler and serve it using OpenSSL over
-// the given tcp address, configured to use the provided cert and key files.
-func ListenAndServeTLS(addr string, cert_file string, key_file string,
- handler http.Handler) error {
- return ServerListenAndServeTLS(
- &http.Server{Addr: addr, Handler: handler}, cert_file, key_file)
-}
-
-// ServerListenAndServeTLS will take an http.Server and serve it using OpenSSL
-// configured to use the provided cert and key files.
-func ServerListenAndServeTLS(srv *http.Server,
- cert_file, key_file string) error {
- addr := srv.Addr
- if addr == "" {
- addr = ":https"
- }
-
- ctx, err := NewCtxFromFiles(cert_file, key_file)
- if err != nil {
- return err
- }
-
- l, err := Listen("tcp", addr, ctx)
- if err != nil {
- return err
- }
-
- return srv.Serve(l)
-}
-
-// TODO: http client integration
-// holy crap, getting this integrated nicely with the Go stdlib HTTP client
-// stack so that it does proxying, connection pooling, and most importantly
-// hostname verification is really hard. So much stuff is hardcoded to just use
-// the built-in TLS lib. I think to get this to work either some crazy
-// hacktackery beyond me, an almost straight up fork of the HTTP client, or
-// serious stdlib internal refactoring is necessary.
-// even more so, good luck getting openssl to use the operating system default
-// root certificates if the user doesn't provide any. sadlol
-// NOTE: if you're going to try and write your own round tripper, at least use
-// openssl.Dial, or equivalent logic
diff --git a/vendor/github.com/libp2p/go-openssl/init.go b/vendor/github.com/libp2p/go-openssl/init.go
deleted file mode 100644
index 107adee15..000000000
--- a/vendor/github.com/libp2p/go-openssl/init.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package openssl is a light wrapper around OpenSSL for Go.
-
-It strives to provide a near-drop-in replacement for the Go standard library
-tls package, while allowing for:
-
-Performance
-
-OpenSSL is battle-tested and optimized C. While Go's built-in library shows
-great promise, it is still young and in some places, inefficient. This simple
-OpenSSL wrapper can often do at least 2x with the same cipher and protocol.
-
-On my lappytop, I get the following benchmarking speeds:
- BenchmarkSHA1Large_openssl 1000 2611282 ns/op 401.56 MB/s
- BenchmarkSHA1Large_stdlib 500 3963983 ns/op 264.53 MB/s
- BenchmarkSHA1Small_openssl 1000000 3476 ns/op 0.29 MB/s
- BenchmarkSHA1Small_stdlib 5000000 550 ns/op 1.82 MB/s
- BenchmarkSHA256Large_openssl 200 8085314 ns/op 129.69 MB/s
- BenchmarkSHA256Large_stdlib 100 18948189 ns/op 55.34 MB/s
- BenchmarkSHA256Small_openssl 1000000 4262 ns/op 0.23 MB/s
- BenchmarkSHA256Small_stdlib 1000000 1444 ns/op 0.69 MB/s
- BenchmarkOpenSSLThroughput 100000 21634 ns/op 47.33 MB/s
- BenchmarkStdlibThroughput 50000 58974 ns/op 17.36 MB/s
-
-Interoperability
-
-Many systems support OpenSSL with a variety of plugins and modules for things,
-such as hardware acceleration in embedded devices.
-
-Greater flexibility and configuration
-
-OpenSSL allows for far greater configuration of corner cases and backwards
-compatibility (such as support of SSLv2). You shouldn't be using SSLv2 if you
-can help but, but sometimes you can't help it.
-
-Security
-
-Yeah yeah, Heartbleed. But according to the author of the standard library's
-TLS implementation, Go's TLS library is vulnerable to timing attacks. And
-whether or not OpenSSL received the appropriate amount of scrutiny
-pre-Heartbleed, it sure is receiving it now.
-
-Usage
-
-Starting an HTTP server that uses OpenSSL is very easy. It's as simple as:
- log.Fatal(openssl.ListenAndServeTLS(
- ":8443", "my_server.crt", "my_server.key", myHandler))
-
-Getting a net.Listener that uses OpenSSL is also easy:
- ctx, err := openssl.NewCtxFromFiles("my_server.crt", "my_server.key")
- if err != nil {
- log.Fatal(err)
- }
- l, err := openssl.Listen("tcp", ":7777", ctx)
-
-Making a client connection is straightforward too:
- ctx, err := NewCtx()
- if err != nil {
- log.Fatal(err)
- }
- err = ctx.LoadVerifyLocations("/etc/ssl/certs/ca-certificates.crt", "")
- if err != nil {
- log.Fatal(err)
- }
- conn, err := openssl.Dial("tcp", "localhost:7777", ctx, 0)
-
-Help wanted: To get this library to work with net/http's client, we
-had to fork net/http. It would be nice if an alternate http client library
-supported the generality needed to use OpenSSL instead of crypto/tls.
-*/
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "fmt"
- "strings"
-)
-
-func init() {
- if rc := C.X_shim_init(); rc != 0 {
- panic(fmt.Errorf("x_shim_init failed with %d", rc))
- }
-}
-
-// errorFromErrorQueue needs to run in the same OS thread as the operation
-// that caused the possible error
-func errorFromErrorQueue() error {
- var errs []string
- for {
- err := C.ERR_get_error()
- if err == 0 {
- break
- }
- errs = append(errs, fmt.Sprintf("%s:%s:%s",
- C.GoString(C.ERR_lib_error_string(err)),
- C.GoString(C.ERR_func_error_string(err)),
- C.GoString(C.ERR_reason_error_string(err))))
- }
- return fmt.Errorf("SSL errors: %s", strings.Join(errs, "\n"))
-}
diff --git a/vendor/github.com/libp2p/go-openssl/init_posix.go b/vendor/github.com/libp2p/go-openssl/init_posix.go
deleted file mode 100644
index f518d2f83..000000000
--- a/vendor/github.com/libp2p/go-openssl/init_posix.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (linux || darwin || solaris || freebsd || openbsd) && !windows
-// +build linux darwin solaris freebsd openbsd
-// +build !windows
-
-package openssl
-
-/*
-#include
-#include
-#include
-
-pthread_mutex_t* goopenssl_locks;
-
-int go_init_locks() {
- int rc = 0;
- int nlock;
- int i;
- int locks_needed = CRYPTO_num_locks();
-
- goopenssl_locks = (pthread_mutex_t*)malloc(
- sizeof(pthread_mutex_t) * locks_needed);
- if (!goopenssl_locks) {
- return ENOMEM;
- }
- for (nlock = 0; nlock < locks_needed; ++nlock) {
- rc = pthread_mutex_init(&goopenssl_locks[nlock], NULL);
- if (rc != 0) {
- break;
- }
- }
-
- if (rc != 0) {
- for (i = nlock - 1; i >= 0; --i) {
- pthread_mutex_destroy(&goopenssl_locks[i]);
- }
- free(goopenssl_locks);
- goopenssl_locks = NULL;
- }
- return rc;
-}
-
-void go_thread_locking_callback(int mode, int n, const char *file,
- int line) {
- if (mode & CRYPTO_LOCK) {
- pthread_mutex_lock(&goopenssl_locks[n]);
- } else {
- pthread_mutex_unlock(&goopenssl_locks[n]);
- }
-}
-
-unsigned long go_thread_id_callback(void) {
- return (unsigned long)pthread_self();
-}
-*/
-import "C"
diff --git a/vendor/github.com/libp2p/go-openssl/init_windows.go b/vendor/github.com/libp2p/go-openssl/init_windows.go
deleted file mode 100644
index 7356b6e2f..000000000
--- a/vendor/github.com/libp2p/go-openssl/init_windows.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-// +build windows
-
-package openssl
-
-/*
-#include
-#include
-#include
-
-CRITICAL_SECTION* goopenssl_locks;
-
-int go_init_locks() {
- int rc = 0;
- int nlock;
- int i;
- int locks_needed = CRYPTO_num_locks();
-
- goopenssl_locks = (CRITICAL_SECTION*)malloc(
- sizeof(*goopenssl_locks) * locks_needed);
- if (!goopenssl_locks) {
- return ENOMEM;
- }
- for (nlock = 0; nlock < locks_needed; ++nlock) {
- InitializeCriticalSection(&goopenssl_locks[nlock]);
- }
-
- return 0;
-}
-
-void go_thread_locking_callback(int mode, int n, const char *file,
- int line) {
- if (mode & CRYPTO_LOCK) {
- EnterCriticalSection(&goopenssl_locks[n]);
- } else {
- LeaveCriticalSection(&goopenssl_locks[n]);
- }
-}
-
-unsigned long go_thread_id_callback(void) {
- return (unsigned long)GetCurrentThreadId();
-}
-*/
-import "C"
diff --git a/vendor/github.com/libp2p/go-openssl/key.go b/vendor/github.com/libp2p/go-openssl/key.go
deleted file mode 100644
index 25be635b1..000000000
--- a/vendor/github.com/libp2p/go-openssl/key.go
+++ /dev/null
@@ -1,522 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "io/ioutil"
- "runtime"
- "unsafe"
-)
-
-var ( // some (effectively) constants for tests to refer to
- ed25519_support = C.X_ED25519_SUPPORT != 0
-)
-
-type Method *C.EVP_MD
-
-var (
- SHA1_Method Method = C.X_EVP_sha1()
- SHA256_Method Method = C.X_EVP_sha256()
- SHA512_Method Method = C.X_EVP_sha512()
-)
-
-// Constants for the various key types.
-// Mapping of name -> NID taken from openssl/evp.h
-const (
- KeyTypeNone = NID_undef
- KeyTypeRSA = NID_rsaEncryption
- KeyTypeRSA2 = NID_rsa
- KeyTypeDSA = NID_dsa
- KeyTypeDSA1 = NID_dsa_2
- KeyTypeDSA2 = NID_dsaWithSHA
- KeyTypeDSA3 = NID_dsaWithSHA1
- KeyTypeDSA4 = NID_dsaWithSHA1_2
- KeyTypeDH = NID_dhKeyAgreement
- KeyTypeDHX = NID_dhpublicnumber
- KeyTypeEC = NID_X9_62_id_ecPublicKey
- KeyTypeHMAC = NID_hmac
- KeyTypeCMAC = NID_cmac
- KeyTypeTLS1PRF = NID_tls1_prf
- KeyTypeHKDF = NID_hkdf
- KeyTypeX25519 = NID_X25519
- KeyTypeX448 = NID_X448
- KeyTypeED25519 = NID_ED25519
- KeyTypeED448 = NID_ED448
-)
-
-type PublicKey interface {
- // Verifies the data signature using PKCS1.15
- VerifyPKCS1v15(method Method, data, sig []byte) error
-
- // MarshalPKIXPublicKeyPEM converts the public key to PEM-encoded PKIX
- // format
- MarshalPKIXPublicKeyPEM() (pem_block []byte, err error)
-
- // MarshalPKIXPublicKeyDER converts the public key to DER-encoded PKIX
- // format
- MarshalPKIXPublicKeyDER() (der_block []byte, err error)
-
- // KeyType returns an identifier for what kind of key is represented by this
- // object.
- KeyType() NID
-
- // BaseType returns an identifier for what kind of key is represented
- // by this object.
- // Keys that share same algorithm but use different legacy formats
- // will have the same BaseType.
- //
- // For example, a key with a `KeyType() == KeyTypeRSA` and a key with a
- // `KeyType() == KeyTypeRSA2` would both have `BaseType() == KeyTypeRSA`.
- BaseType() NID
-
- // Equal compares the key with the passed in key.
- Equal(key PublicKey) bool
-
- // Size returns the size (in bytes) of signatures created with this key.
- Size() int
-
- evpPKey() *C.EVP_PKEY
-}
-
-type PrivateKey interface {
- PublicKey
-
- // Signs the data using PKCS1.15
- SignPKCS1v15(Method, []byte) ([]byte, error)
-
- // MarshalPKCS1PrivateKeyPEM converts the private key to PEM-encoded PKCS1
- // format
- MarshalPKCS1PrivateKeyPEM() (pem_block []byte, err error)
-
- // MarshalPKCS1PrivateKeyDER converts the private key to DER-encoded PKCS1
- // format
- MarshalPKCS1PrivateKeyDER() (der_block []byte, err error)
-}
-
-type pKey struct {
- key *C.EVP_PKEY
-}
-
-func (key *pKey) evpPKey() *C.EVP_PKEY { return key.key }
-
-func (key *pKey) Equal(other PublicKey) bool {
- return C.EVP_PKEY_cmp(key.key, other.evpPKey()) == 1
-}
-
-func (key *pKey) KeyType() NID {
- return NID(C.EVP_PKEY_id(key.key))
-}
-
-func (key *pKey) Size() int {
- return int(C.EVP_PKEY_size(key.key))
-}
-
-func (key *pKey) BaseType() NID {
- return NID(C.EVP_PKEY_base_id(key.key))
-}
-
-func (key *pKey) SignPKCS1v15(method Method, data []byte) ([]byte, error) {
-
- ctx := C.X_EVP_MD_CTX_new()
- defer C.X_EVP_MD_CTX_free(ctx)
-
- if key.KeyType() == KeyTypeED25519 {
- // do ED specific one-shot sign
-
- if method != nil || len(data) == 0 {
- return nil, errors.New("signpkcs1v15: 0-length data or non-null digest")
- }
-
- if C.X_EVP_DigestSignInit(ctx, nil, nil, nil, key.key) != 1 {
- return nil, errors.New("signpkcs1v15: failed to init signature")
- }
-
- // evp signatures are 64 bytes
- sig := make([]byte, 64)
- var sigblen C.size_t = 64
- if C.X_EVP_DigestSign(ctx,
- (*C.uchar)(unsafe.Pointer(&sig[0])),
- &sigblen,
- (*C.uchar)(unsafe.Pointer(&data[0])),
- C.size_t(len(data))) != 1 {
- return nil, errors.New("signpkcs1v15: failed to do one-shot signature")
- }
-
- return sig[:sigblen], nil
- } else {
- if C.X_EVP_SignInit(ctx, method) != 1 {
- return nil, errors.New("signpkcs1v15: failed to init signature")
- }
- if len(data) > 0 {
- if C.X_EVP_SignUpdate(
- ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) != 1 {
- return nil, errors.New("signpkcs1v15: failed to update signature")
- }
- }
- sig := make([]byte, C.X_EVP_PKEY_size(key.key))
- var sigblen C.uint
- if C.X_EVP_SignFinal(ctx,
- (*C.uchar)(unsafe.Pointer(&sig[0])), &sigblen, key.key) != 1 {
- return nil, errors.New("signpkcs1v15: failed to finalize signature")
- }
- return sig[:sigblen], nil
- }
-}
-
-func (key *pKey) VerifyPKCS1v15(method Method, data, sig []byte) error {
- ctx := C.X_EVP_MD_CTX_new()
- defer C.X_EVP_MD_CTX_free(ctx)
-
- if len(sig) == 0 {
- return errors.New("verifypkcs1v15: 0-length sig")
- }
-
- if key.KeyType() == KeyTypeED25519 {
- // do ED specific one-shot sign
-
- if method != nil || len(data) == 0 {
- return errors.New("verifypkcs1v15: 0-length data or non-null digest")
- }
-
- if C.X_EVP_DigestVerifyInit(ctx, nil, nil, nil, key.key) != 1 {
- return errors.New("verifypkcs1v15: failed to init verify")
- }
-
- if C.X_EVP_DigestVerify(ctx,
- (*C.uchar)(unsafe.Pointer(&sig[0])),
- C.size_t(len(sig)),
- (*C.uchar)(unsafe.Pointer(&data[0])),
- C.size_t(len(data))) != 1 {
- return errors.New("verifypkcs1v15: failed to do one-shot verify")
- }
-
- return nil
-
- } else {
- if C.X_EVP_VerifyInit(ctx, method) != 1 {
- return errors.New("verifypkcs1v15: failed to init verify")
- }
- if len(data) > 0 {
- if C.X_EVP_VerifyUpdate(
- ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) != 1 {
- return errors.New("verifypkcs1v15: failed to update verify")
- }
- }
- if C.X_EVP_VerifyFinal(ctx,
- (*C.uchar)(unsafe.Pointer(&sig[0])), C.uint(len(sig)), key.key) != 1 {
- return errors.New("verifypkcs1v15: failed to finalize verify")
- }
- return nil
- }
-}
-
-func (key *pKey) MarshalPKCS1PrivateKeyPEM() (pem_block []byte,
- err error) {
- bio := C.BIO_new(C.BIO_s_mem())
- if bio == nil {
- return nil, errors.New("failed to allocate memory BIO")
- }
- defer C.BIO_free(bio)
-
- // PEM_write_bio_PrivateKey_traditional will use the key-specific PKCS1
- // format if one is available for that key type, otherwise it will encode
- // to a PKCS8 key.
- if int(C.X_PEM_write_bio_PrivateKey_traditional(bio, key.key, nil, nil,
- C.int(0), nil, nil)) != 1 {
- return nil, errors.New("failed dumping private key")
- }
-
- return ioutil.ReadAll(asAnyBio(bio))
-}
-
-func (key *pKey) MarshalPKCS1PrivateKeyDER() (der_block []byte,
- err error) {
- bio := C.BIO_new(C.BIO_s_mem())
- if bio == nil {
- return nil, errors.New("failed to allocate memory BIO")
- }
- defer C.BIO_free(bio)
-
- if int(C.i2d_PrivateKey_bio(bio, key.key)) != 1 {
- return nil, errors.New("failed dumping private key der")
- }
-
- return ioutil.ReadAll(asAnyBio(bio))
-}
-
-func (key *pKey) MarshalPKIXPublicKeyPEM() (pem_block []byte,
- err error) {
- bio := C.BIO_new(C.BIO_s_mem())
- if bio == nil {
- return nil, errors.New("failed to allocate memory BIO")
- }
- defer C.BIO_free(bio)
-
- if int(C.PEM_write_bio_PUBKEY(bio, key.key)) != 1 {
- return nil, errors.New("failed dumping public key pem")
- }
-
- return ioutil.ReadAll(asAnyBio(bio))
-}
-
-func (key *pKey) MarshalPKIXPublicKeyDER() (der_block []byte,
- err error) {
- bio := C.BIO_new(C.BIO_s_mem())
- if bio == nil {
- return nil, errors.New("failed to allocate memory BIO")
- }
- defer C.BIO_free(bio)
-
- if int(C.i2d_PUBKEY_bio(bio, key.key)) != 1 {
- return nil, errors.New("failed dumping public key der")
- }
-
- return ioutil.ReadAll(asAnyBio(bio))
-}
-
-// LoadPrivateKeyFromPEM loads a private key from a PEM-encoded block.
-func LoadPrivateKeyFromPEM(pem_block []byte) (PrivateKey, error) {
- if len(pem_block) == 0 {
- return nil, errors.New("empty pem block")
- }
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
- C.int(len(pem_block)))
- if bio == nil {
- return nil, errors.New("failed creating bio")
- }
- defer C.BIO_free(bio)
-
- key := C.PEM_read_bio_PrivateKey(bio, nil, nil, nil)
- if key == nil {
- return nil, errors.New("failed reading private key")
- }
-
- p := &pKey{key: key}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// LoadPrivateKeyFromPEMWithPassword loads a private key from a PEM-encoded block.
-func LoadPrivateKeyFromPEMWithPassword(pem_block []byte, password string) (
- PrivateKey, error) {
- if len(pem_block) == 0 {
- return nil, errors.New("empty pem block")
- }
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
- C.int(len(pem_block)))
- if bio == nil {
- return nil, errors.New("failed creating bio")
- }
- defer C.BIO_free(bio)
- cs := C.CString(password)
- defer C.free(unsafe.Pointer(cs))
- key := C.PEM_read_bio_PrivateKey(bio, nil, nil, unsafe.Pointer(cs))
- if key == nil {
- return nil, errors.New("failed reading private key")
- }
-
- p := &pKey{key: key}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// LoadPrivateKeyFromDER loads a private key from a DER-encoded block.
-func LoadPrivateKeyFromDER(der_block []byte) (PrivateKey, error) {
- if len(der_block) == 0 {
- return nil, errors.New("empty der block")
- }
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]),
- C.int(len(der_block)))
- if bio == nil {
- return nil, errors.New("failed creating bio")
- }
- defer C.BIO_free(bio)
-
- key := C.d2i_PrivateKey_bio(bio, nil)
- if key == nil {
- return nil, errors.New("failed reading private key der")
- }
-
- p := &pKey{key: key}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// LoadPrivateKeyFromPEMWidthPassword loads a private key from a PEM-encoded block.
-// Backwards-compatible with typo
-func LoadPrivateKeyFromPEMWidthPassword(pem_block []byte, password string) (
- PrivateKey, error) {
- return LoadPrivateKeyFromPEMWithPassword(pem_block, password)
-}
-
-// LoadPublicKeyFromPEM loads a public key from a PEM-encoded block.
-func LoadPublicKeyFromPEM(pem_block []byte) (PublicKey, error) {
- if len(pem_block) == 0 {
- return nil, errors.New("empty pem block")
- }
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
- C.int(len(pem_block)))
- if bio == nil {
- return nil, errors.New("failed creating bio")
- }
- defer C.BIO_free(bio)
-
- key := C.PEM_read_bio_PUBKEY(bio, nil, nil, nil)
- if key == nil {
- return nil, errors.New("failed reading public key der")
- }
-
- p := &pKey{key: key}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// LoadPublicKeyFromDER loads a public key from a DER-encoded block.
-func LoadPublicKeyFromDER(der_block []byte) (PublicKey, error) {
- if len(der_block) == 0 {
- return nil, errors.New("empty der block")
- }
- bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]),
- C.int(len(der_block)))
- if bio == nil {
- return nil, errors.New("failed creating bio")
- }
- defer C.BIO_free(bio)
-
- key := C.d2i_PUBKEY_bio(bio, nil)
- if key == nil {
- return nil, errors.New("failed reading public key der")
- }
-
- p := &pKey{key: key}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// GenerateRSAKey generates a new RSA private key with an exponent of 3.
-func GenerateRSAKey(bits int) (PrivateKey, error) {
- return GenerateRSAKeyWithExponent(bits, 3)
-}
-
-// GenerateRSAKeyWithExponent generates a new RSA private key.
-func GenerateRSAKeyWithExponent(bits int, exponent int) (PrivateKey, error) {
- rsa := C.RSA_generate_key(C.int(bits), C.ulong(exponent), nil, nil)
- if rsa == nil {
- return nil, errors.New("failed to generate RSA key")
- }
- key := C.X_EVP_PKEY_new()
- if key == nil {
- return nil, errors.New("failed to allocate EVP_PKEY")
- }
- if C.X_EVP_PKEY_assign_charp(key, C.EVP_PKEY_RSA, (*C.char)(unsafe.Pointer(rsa))) != 1 {
- C.X_EVP_PKEY_free(key)
- return nil, errors.New("failed to assign RSA key")
- }
- p := &pKey{key: key}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// GenerateECKey generates a new elliptic curve private key on the speicified
-// curve.
-func GenerateECKey(curve EllipticCurve) (PrivateKey, error) {
-
- // Create context for parameter generation
- paramCtx := C.EVP_PKEY_CTX_new_id(C.EVP_PKEY_EC, nil)
- if paramCtx == nil {
- return nil, errors.New("failed creating EC parameter generation context")
- }
- defer C.EVP_PKEY_CTX_free(paramCtx)
-
- // Intialize the parameter generation
- if int(C.EVP_PKEY_paramgen_init(paramCtx)) != 1 {
- return nil, errors.New("failed initializing EC parameter generation context")
- }
-
- // Set curve in EC parameter generation context
- if int(C.X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(paramCtx, C.int(curve))) != 1 {
- return nil, errors.New("failed setting curve in EC parameter generation context")
- }
-
- // Create parameter object
- var params *C.EVP_PKEY
- if int(C.EVP_PKEY_paramgen(paramCtx, ¶ms)) != 1 {
- return nil, errors.New("failed creating EC key generation parameters")
- }
- defer C.EVP_PKEY_free(params)
-
- // Create context for the key generation
- keyCtx := C.EVP_PKEY_CTX_new(params, nil)
- if keyCtx == nil {
- return nil, errors.New("failed creating EC key generation context")
- }
- defer C.EVP_PKEY_CTX_free(keyCtx)
-
- // Generate the key
- var privKey *C.EVP_PKEY
- if int(C.EVP_PKEY_keygen_init(keyCtx)) != 1 {
- return nil, errors.New("failed initializing EC key generation context")
- }
- if int(C.EVP_PKEY_keygen(keyCtx, &privKey)) != 1 {
- return nil, errors.New("failed generating EC private key")
- }
-
- p := &pKey{key: privKey}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
-
-// GenerateED25519Key generates a Ed25519 key
-func GenerateED25519Key() (PrivateKey, error) {
- // Key context
- keyCtx := C.EVP_PKEY_CTX_new_id(C.X_EVP_PKEY_ED25519, nil)
- if keyCtx == nil {
- return nil, errors.New("failed creating EC parameter generation context")
- }
- defer C.EVP_PKEY_CTX_free(keyCtx)
-
- // Generate the key
- var privKey *C.EVP_PKEY
- if int(C.EVP_PKEY_keygen_init(keyCtx)) != 1 {
- return nil, errors.New("failed initializing ED25519 key generation context")
- }
- if int(C.EVP_PKEY_keygen(keyCtx, &privKey)) != 1 {
- return nil, errors.New("failed generating ED25519 private key")
- }
-
- p := &pKey{key: privKey}
- runtime.SetFinalizer(p, func(p *pKey) {
- C.X_EVP_PKEY_free(p.key)
- })
- return p, nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/mapping.go b/vendor/github.com/libp2p/go-openssl/mapping.go
deleted file mode 100644
index d78cc7034..000000000
--- a/vendor/github.com/libp2p/go-openssl/mapping.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-import (
- "sync"
- "unsafe"
-)
-
-// #include
-import "C"
-
-type mapping struct {
- lock sync.Mutex
- values map[token]unsafe.Pointer
-}
-
-func newMapping() *mapping {
- return &mapping{
- values: make(map[token]unsafe.Pointer),
- }
-}
-
-type token unsafe.Pointer
-
-func (m *mapping) Add(x unsafe.Pointer) token {
- res := token(C.malloc(1))
-
- m.lock.Lock()
- m.values[res] = x
- m.lock.Unlock()
-
- return res
-}
-
-func (m *mapping) Get(x token) unsafe.Pointer {
- m.lock.Lock()
- res := m.values[x]
- m.lock.Unlock()
-
- return res
-}
-
-func (m *mapping) Del(x token) {
- m.lock.Lock()
- delete(m.values, x)
- m.lock.Unlock()
-
- C.free(unsafe.Pointer(x))
-}
diff --git a/vendor/github.com/libp2p/go-openssl/md4.go b/vendor/github.com/libp2p/go-openssl/md4.go
deleted file mode 100644
index 95d9d2d22..000000000
--- a/vendor/github.com/libp2p/go-openssl/md4.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-type MD4Hash struct {
- ctx *C.EVP_MD_CTX
- engine *Engine
-}
-
-func NewMD4Hash() (*MD4Hash, error) { return NewMD4HashWithEngine(nil) }
-
-func NewMD4HashWithEngine(e *Engine) (*MD4Hash, error) {
- hash := &MD4Hash{engine: e}
- hash.ctx = C.X_EVP_MD_CTX_new()
- if hash.ctx == nil {
- return nil, errors.New("openssl: md4: unable to allocate ctx")
- }
- runtime.SetFinalizer(hash, func(hash *MD4Hash) { hash.Close() })
- if err := hash.Reset(); err != nil {
- return nil, err
- }
- return hash, nil
-}
-
-func (s *MD4Hash) Close() {
- if s.ctx != nil {
- C.X_EVP_MD_CTX_free(s.ctx)
- s.ctx = nil
- }
-}
-
-func (s *MD4Hash) Reset() error {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_md4(), engineRef(s.engine)) != 1 {
- return errors.New("openssl: md4: cannot init digest ctx: " +
- errorFromErrorQueue().Error())
- }
- return nil
-}
-
-func (s *MD4Hash) Write(p []byte) (n int, err error) {
- if len(p) == 0 {
- return 0, nil
- }
- if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]),
- C.size_t(len(p))) != 1 {
- return 0, errors.New("openssl: md4: cannot update digest")
- }
- return len(p), nil
-}
-
-func (s *MD4Hash) Sum() (result [16]byte, err error) {
- if C.X_EVP_DigestFinal_ex(s.ctx,
- (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 {
- return result, errors.New("openssl: md4: cannot finalize ctx")
- }
- return result, s.Reset()
-}
-
-func MD4(data []byte) (result [16]byte, err error) {
- hash, err := NewMD4Hash()
- if err != nil {
- return result, err
- }
- defer hash.Close()
- if _, err := hash.Write(data); err != nil {
- return result, err
- }
- return hash.Sum()
-}
diff --git a/vendor/github.com/libp2p/go-openssl/md5.go b/vendor/github.com/libp2p/go-openssl/md5.go
deleted file mode 100644
index d7e771ee6..000000000
--- a/vendor/github.com/libp2p/go-openssl/md5.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-type MD5Hash struct {
- ctx *C.EVP_MD_CTX
- engine *Engine
-}
-
-func NewMD5Hash() (*MD5Hash, error) { return NewMD5HashWithEngine(nil) }
-
-func NewMD5HashWithEngine(e *Engine) (*MD5Hash, error) {
- hash := &MD5Hash{engine: e}
- hash.ctx = C.X_EVP_MD_CTX_new()
- if hash.ctx == nil {
- return nil, errors.New("openssl: md5: unable to allocate ctx")
- }
- runtime.SetFinalizer(hash, func(hash *MD5Hash) { hash.Close() })
- if err := hash.Reset(); err != nil {
- return nil, err
- }
- return hash, nil
-}
-
-func (s *MD5Hash) Close() {
- if s.ctx != nil {
- C.X_EVP_MD_CTX_free(s.ctx)
- s.ctx = nil
- }
-}
-
-func (s *MD5Hash) Reset() error {
- if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_md5(), engineRef(s.engine)) != 1 {
- return errors.New("openssl: md5: cannot init digest ctx")
- }
- return nil
-}
-
-func (s *MD5Hash) Write(p []byte) (n int, err error) {
- if len(p) == 0 {
- return 0, nil
- }
- if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]),
- C.size_t(len(p))) != 1 {
- return 0, errors.New("openssl: md5: cannot update digest")
- }
- return len(p), nil
-}
-
-func (s *MD5Hash) Sum() (result [16]byte, err error) {
- if C.X_EVP_DigestFinal_ex(s.ctx,
- (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 {
- return result, errors.New("openssl: md5: cannot finalize ctx")
- }
- return result, s.Reset()
-}
-
-func MD5(data []byte) (result [16]byte, err error) {
- hash, err := NewMD5Hash()
- if err != nil {
- return result, err
- }
- defer hash.Close()
- if _, err := hash.Write(data); err != nil {
- return result, err
- }
- return hash.Sum()
-}
diff --git a/vendor/github.com/libp2p/go-openssl/net.go b/vendor/github.com/libp2p/go-openssl/net.go
deleted file mode 100644
index b2293c7c9..000000000
--- a/vendor/github.com/libp2p/go-openssl/net.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-import (
- "errors"
- "net"
- "time"
-)
-
-type listener struct {
- net.Listener
- ctx *Ctx
-}
-
-func (l *listener) Accept() (c net.Conn, err error) {
- c, err = l.Listener.Accept()
- if err != nil {
- return nil, err
- }
- ssl_c, err := Server(c, l.ctx)
- if err != nil {
- c.Close()
- return nil, err
- }
- return ssl_c, nil
-}
-
-// NewListener wraps an existing net.Listener such that all accepted
-// connections are wrapped as OpenSSL server connections using the provided
-// context ctx.
-func NewListener(inner net.Listener, ctx *Ctx) net.Listener {
- return &listener{
- Listener: inner,
- ctx: ctx}
-}
-
-// Listen is a wrapper around net.Listen that wraps incoming connections with
-// an OpenSSL server connection using the provided context ctx.
-func Listen(network, laddr string, ctx *Ctx) (net.Listener, error) {
- if ctx == nil {
- return nil, errors.New("no ssl context provided")
- }
- l, err := net.Listen(network, laddr)
- if err != nil {
- return nil, err
- }
- return NewListener(l, ctx), nil
-}
-
-type DialFlags int
-
-const (
- InsecureSkipHostVerification DialFlags = 1 << iota
- DisableSNI
-)
-
-// Dial will connect to network/address and then wrap the corresponding
-// underlying connection with an OpenSSL client connection using context ctx.
-// If flags includes InsecureSkipHostVerification, the server certificate's
-// hostname will not be checked to match the hostname in addr. Otherwise, flags
-// should be 0.
-//
-// Dial probably won't work for you unless you set a verify location or add
-// some certs to the certificate store of the client context you're using.
-// This library is not nice enough to use the system certificate store by
-// default for you yet.
-func Dial(network, addr string, ctx *Ctx, flags DialFlags) (*Conn, error) {
- return DialSession(network, addr, ctx, flags, nil)
-}
-
-// DialTimeout acts like Dial but takes a timeout for network dial.
-//
-// The timeout includes only network dial. It does not include OpenSSL calls.
-//
-// See func Dial for a description of the network, addr, ctx and flags
-// parameters.
-func DialTimeout(network, addr string, timeout time.Duration, ctx *Ctx,
- flags DialFlags) (*Conn, error) {
- d := net.Dialer{Timeout: timeout}
- return dialSession(d, network, addr, ctx, flags, nil)
-}
-
-// DialSession will connect to network/address and then wrap the corresponding
-// underlying connection with an OpenSSL client connection using context ctx.
-// If flags includes InsecureSkipHostVerification, the server certificate's
-// hostname will not be checked to match the hostname in addr. Otherwise, flags
-// should be 0.
-//
-// Dial probably won't work for you unless you set a verify location or add
-// some certs to the certificate store of the client context you're using.
-// This library is not nice enough to use the system certificate store by
-// default for you yet.
-//
-// If session is not nil it will be used to resume the tls state. The session
-// can be retrieved from the GetSession method on the Conn.
-func DialSession(network, addr string, ctx *Ctx, flags DialFlags,
- session []byte) (*Conn, error) {
- var d net.Dialer
- return dialSession(d, network, addr, ctx, flags, session)
-}
-
-func dialSession(d net.Dialer, network, addr string, ctx *Ctx, flags DialFlags,
- session []byte) (*Conn, error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- if ctx == nil {
- var err error
- ctx, err = NewCtx()
- if err != nil {
- return nil, err
- }
- // TODO: use operating system default certificate chain?
- }
-
- c, err := d.Dial(network, addr)
- if err != nil {
- return nil, err
- }
- conn, err := Client(c, ctx)
- if err != nil {
- c.Close()
- return nil, err
- }
- if session != nil {
- err := conn.setSession(session)
- if err != nil {
- c.Close()
- return nil, err
- }
- }
- if flags&DisableSNI == 0 {
- err = conn.SetTlsExtHostName(host)
- if err != nil {
- conn.Close()
- return nil, err
- }
- }
- err = conn.Handshake()
- if err != nil {
- conn.Close()
- return nil, err
- }
- if flags&InsecureSkipHostVerification == 0 {
- err = conn.VerifyHostname(host)
- if err != nil {
- conn.Close()
- return nil, err
- }
- }
- return conn, nil
-}
diff --git a/vendor/github.com/libp2p/go-openssl/nid.go b/vendor/github.com/libp2p/go-openssl/nid.go
deleted file mode 100644
index 936a52e77..000000000
--- a/vendor/github.com/libp2p/go-openssl/nid.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-type NID int
-
-const (
- NID_undef NID = 0
- NID_rsadsi NID = 1
- NID_pkcs NID = 2
- NID_md2 NID = 3
- NID_md5 NID = 4
- NID_rc4 NID = 5
- NID_rsaEncryption NID = 6
- NID_md2WithRSAEncryption NID = 7
- NID_md5WithRSAEncryption NID = 8
- NID_pbeWithMD2AndDES_CBC NID = 9
- NID_pbeWithMD5AndDES_CBC NID = 10
- NID_X500 NID = 11
- NID_X509 NID = 12
- NID_commonName NID = 13
- NID_countryName NID = 14
- NID_localityName NID = 15
- NID_stateOrProvinceName NID = 16
- NID_organizationName NID = 17
- NID_organizationalUnitName NID = 18
- NID_rsa NID = 19
- NID_pkcs7 NID = 20
- NID_pkcs7_data NID = 21
- NID_pkcs7_signed NID = 22
- NID_pkcs7_enveloped NID = 23
- NID_pkcs7_signedAndEnveloped NID = 24
- NID_pkcs7_digest NID = 25
- NID_pkcs7_encrypted NID = 26
- NID_pkcs3 NID = 27
- NID_dhKeyAgreement NID = 28
- NID_des_ecb NID = 29
- NID_des_cfb64 NID = 30
- NID_des_cbc NID = 31
- NID_des_ede NID = 32
- NID_des_ede3 NID = 33
- NID_idea_cbc NID = 34
- NID_idea_cfb64 NID = 35
- NID_idea_ecb NID = 36
- NID_rc2_cbc NID = 37
- NID_rc2_ecb NID = 38
- NID_rc2_cfb64 NID = 39
- NID_rc2_ofb64 NID = 40
- NID_sha NID = 41
- NID_shaWithRSAEncryption NID = 42
- NID_des_ede_cbc NID = 43
- NID_des_ede3_cbc NID = 44
- NID_des_ofb64 NID = 45
- NID_idea_ofb64 NID = 46
- NID_pkcs9 NID = 47
- NID_pkcs9_emailAddress NID = 48
- NID_pkcs9_unstructuredName NID = 49
- NID_pkcs9_contentType NID = 50
- NID_pkcs9_messageDigest NID = 51
- NID_pkcs9_signingTime NID = 52
- NID_pkcs9_countersignature NID = 53
- NID_pkcs9_challengePassword NID = 54
- NID_pkcs9_unstructuredAddress NID = 55
- NID_pkcs9_extCertAttributes NID = 56
- NID_netscape NID = 57
- NID_netscape_cert_extension NID = 58
- NID_netscape_data_type NID = 59
- NID_des_ede_cfb64 NID = 60
- NID_des_ede3_cfb64 NID = 61
- NID_des_ede_ofb64 NID = 62
- NID_des_ede3_ofb64 NID = 63
- NID_sha1 NID = 64
- NID_sha1WithRSAEncryption NID = 65
- NID_dsaWithSHA NID = 66
- NID_dsa_2 NID = 67
- NID_pbeWithSHA1AndRC2_CBC NID = 68
- NID_id_pbkdf2 NID = 69
- NID_dsaWithSHA1_2 NID = 70
- NID_netscape_cert_type NID = 71
- NID_netscape_base_url NID = 72
- NID_netscape_revocation_url NID = 73
- NID_netscape_ca_revocation_url NID = 74
- NID_netscape_renewal_url NID = 75
- NID_netscape_ca_policy_url NID = 76
- NID_netscape_ssl_server_name NID = 77
- NID_netscape_comment NID = 78
- NID_netscape_cert_sequence NID = 79
- NID_desx_cbc NID = 80
- NID_id_ce NID = 81
- NID_subject_key_identifier NID = 82
- NID_key_usage NID = 83
- NID_private_key_usage_period NID = 84
- NID_subject_alt_name NID = 85
- NID_issuer_alt_name NID = 86
- NID_basic_constraints NID = 87
- NID_crl_number NID = 88
- NID_certificate_policies NID = 89
- NID_authority_key_identifier NID = 90
- NID_bf_cbc NID = 91
- NID_bf_ecb NID = 92
- NID_bf_cfb64 NID = 93
- NID_bf_ofb64 NID = 94
- NID_mdc2 NID = 95
- NID_mdc2WithRSA NID = 96
- NID_rc4_40 NID = 97
- NID_rc2_40_cbc NID = 98
- NID_givenName NID = 99
- NID_surname NID = 100
- NID_initials NID = 101
- NID_uniqueIdentifier NID = 102
- NID_crl_distribution_points NID = 103
- NID_md5WithRSA NID = 104
- NID_serialNumber NID = 105
- NID_title NID = 106
- NID_description NID = 107
- NID_cast5_cbc NID = 108
- NID_cast5_ecb NID = 109
- NID_cast5_cfb64 NID = 110
- NID_cast5_ofb64 NID = 111
- NID_pbeWithMD5AndCast5_CBC NID = 112
- NID_dsaWithSHA1 NID = 113
- NID_md5_sha1 NID = 114
- NID_sha1WithRSA NID = 115
- NID_dsa NID = 116
- NID_ripemd160 NID = 117
- NID_ripemd160WithRSA NID = 119
- NID_rc5_cbc NID = 120
- NID_rc5_ecb NID = 121
- NID_rc5_cfb64 NID = 122
- NID_rc5_ofb64 NID = 123
- NID_rle_compression NID = 124
- NID_zlib_compression NID = 125
- NID_ext_key_usage NID = 126
- NID_id_pkix NID = 127
- NID_id_kp NID = 128
- NID_server_auth NID = 129
- NID_client_auth NID = 130
- NID_code_sign NID = 131
- NID_email_protect NID = 132
- NID_time_stamp NID = 133
- NID_ms_code_ind NID = 134
- NID_ms_code_com NID = 135
- NID_ms_ctl_sign NID = 136
- NID_ms_sgc NID = 137
- NID_ms_efs NID = 138
- NID_ns_sgc NID = 139
- NID_delta_crl NID = 140
- NID_crl_reason NID = 141
- NID_invalidity_date NID = 142
- NID_sxnet NID = 143
- NID_pbe_WithSHA1And128BitRC4 NID = 144
- NID_pbe_WithSHA1And40BitRC4 NID = 145
- NID_pbe_WithSHA1And3_Key_TripleDES_CBC NID = 146
- NID_pbe_WithSHA1And2_Key_TripleDES_CBC NID = 147
- NID_pbe_WithSHA1And128BitRC2_CBC NID = 148
- NID_pbe_WithSHA1And40BitRC2_CBC NID = 149
- NID_keyBag NID = 150
- NID_pkcs8ShroudedKeyBag NID = 151
- NID_certBag NID = 152
- NID_crlBag NID = 153
- NID_secretBag NID = 154
- NID_safeContentsBag NID = 155
- NID_friendlyName NID = 156
- NID_localKeyID NID = 157
- NID_x509Certificate NID = 158
- NID_sdsiCertificate NID = 159
- NID_x509Crl NID = 160
- NID_pbes2 NID = 161
- NID_pbmac1 NID = 162
- NID_hmacWithSHA1 NID = 163
- NID_id_qt_cps NID = 164
- NID_id_qt_unotice NID = 165
- NID_rc2_64_cbc NID = 166
- NID_SMIMECapabilities NID = 167
- NID_pbeWithMD2AndRC2_CBC NID = 168
- NID_pbeWithMD5AndRC2_CBC NID = 169
- NID_pbeWithSHA1AndDES_CBC NID = 170
- NID_ms_ext_req NID = 171
- NID_ext_req NID = 172
- NID_name NID = 173
- NID_dnQualifier NID = 174
- NID_id_pe NID = 175
- NID_id_ad NID = 176
- NID_info_access NID = 177
- NID_ad_OCSP NID = 178
- NID_ad_ca_issuers NID = 179
- NID_OCSP_sign NID = 180
- NID_X9_62_id_ecPublicKey NID = 408
- NID_hmac NID = 855
- NID_cmac NID = 894
- NID_dhpublicnumber NID = 920
- NID_tls1_prf NID = 1021
- NID_hkdf NID = 1036
- NID_X25519 NID = 1034
- NID_X448 NID = 1035
- NID_ED25519 NID = 1087
- NID_ED448 NID = 1088
-)
diff --git a/vendor/github.com/libp2p/go-openssl/object.go b/vendor/github.com/libp2p/go-openssl/object.go
deleted file mode 100644
index 4d908e6c8..000000000
--- a/vendor/github.com/libp2p/go-openssl/object.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (C) 2020. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-// CreateObjectIdentifier creates ObjectIdentifier and returns NID for the created
-// ObjectIdentifier
-func CreateObjectIdentifier(oid string, shortName string, longName string) NID {
- return NID(C.OBJ_create(C.CString(oid), C.CString(shortName), C.CString(longName)))
-}
diff --git a/vendor/github.com/libp2p/go-openssl/pem.go b/vendor/github.com/libp2p/go-openssl/pem.go
deleted file mode 100644
index 6127cf07c..000000000
--- a/vendor/github.com/libp2p/go-openssl/pem.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-import (
- "regexp"
-)
-
-var pemSplit *regexp.Regexp = regexp.MustCompile(`(?sm)` +
- `(^-----[\s-]*?BEGIN.*?-----[\s-]*?$` +
- `.*?` +
- `^-----[\s-]*?END.*?-----[\s-]*?$)`)
-
-func SplitPEM(data []byte) [][]byte {
- return pemSplit.FindAll(data, -1)
-}
diff --git a/vendor/github.com/libp2p/go-openssl/sha1.go b/vendor/github.com/libp2p/go-openssl/sha1.go
deleted file mode 100644
index ab4ad87f1..000000000
--- a/vendor/github.com/libp2p/go-openssl/sha1.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-type SHA1Hash struct {
- ctx *C.EVP_MD_CTX
- engine *Engine
-}
-
-func NewSHA1Hash() (*SHA1Hash, error) { return NewSHA1HashWithEngine(nil) }
-
-func NewSHA1HashWithEngine(e *Engine) (*SHA1Hash, error) {
- hash := &SHA1Hash{engine: e}
- hash.ctx = C.X_EVP_MD_CTX_new()
- if hash.ctx == nil {
- return nil, errors.New("openssl: sha1: unable to allocate ctx")
- }
- runtime.SetFinalizer(hash, func(hash *SHA1Hash) { hash.Close() })
- if err := hash.Reset(); err != nil {
- return nil, err
- }
- return hash, nil
-}
-
-func (s *SHA1Hash) Close() {
- if s.ctx != nil {
- C.X_EVP_MD_CTX_free(s.ctx)
- s.ctx = nil
- }
-}
-
-func engineRef(e *Engine) *C.ENGINE {
- if e == nil {
- return nil
- }
- return e.e
-}
-
-func (s *SHA1Hash) Reset() error {
- if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_sha1(), engineRef(s.engine)) != 1 {
- return errors.New("openssl: sha1: cannot init digest ctx")
- }
- return nil
-}
-
-func (s *SHA1Hash) Write(p []byte) (n int, err error) {
- if len(p) == 0 {
- return 0, nil
- }
- if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]),
- C.size_t(len(p))) != 1 {
- return 0, errors.New("openssl: sha1: cannot update digest")
- }
- return len(p), nil
-}
-
-func (s *SHA1Hash) Sum() (result [20]byte, err error) {
- if C.X_EVP_DigestFinal_ex(s.ctx,
- (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 {
- return result, errors.New("openssl: sha1: cannot finalize ctx")
- }
- return result, s.Reset()
-}
-
-func SHA1(data []byte) (result [20]byte, err error) {
- hash, err := NewSHA1Hash()
- if err != nil {
- return result, err
- }
- defer hash.Close()
- if _, err := hash.Write(data); err != nil {
- return result, err
- }
- return hash.Sum()
-}
diff --git a/vendor/github.com/libp2p/go-openssl/sha256.go b/vendor/github.com/libp2p/go-openssl/sha256.go
deleted file mode 100644
index d9189a94b..000000000
--- a/vendor/github.com/libp2p/go-openssl/sha256.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "errors"
- "runtime"
- "unsafe"
-)
-
-type SHA256Hash struct {
- ctx *C.EVP_MD_CTX
- engine *Engine
-}
-
-func NewSHA256Hash() (*SHA256Hash, error) { return NewSHA256HashWithEngine(nil) }
-
-func NewSHA256HashWithEngine(e *Engine) (*SHA256Hash, error) {
- hash := &SHA256Hash{engine: e}
- hash.ctx = C.X_EVP_MD_CTX_new()
- if hash.ctx == nil {
- return nil, errors.New("openssl: sha256: unable to allocate ctx")
- }
- runtime.SetFinalizer(hash, func(hash *SHA256Hash) { hash.Close() })
- if err := hash.Reset(); err != nil {
- return nil, err
- }
- return hash, nil
-}
-
-func (s *SHA256Hash) Close() {
- if s.ctx != nil {
- C.X_EVP_MD_CTX_free(s.ctx)
- s.ctx = nil
- }
-}
-
-func (s *SHA256Hash) Reset() error {
- if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_sha256(), engineRef(s.engine)) != 1 {
- return errors.New("openssl: sha256: cannot init digest ctx")
- }
- return nil
-}
-
-func (s *SHA256Hash) Write(p []byte) (n int, err error) {
- if len(p) == 0 {
- return 0, nil
- }
- if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]),
- C.size_t(len(p))) != 1 {
- return 0, errors.New("openssl: sha256: cannot update digest")
- }
- return len(p), nil
-}
-
-func (s *SHA256Hash) Sum() (result [32]byte, err error) {
- if C.X_EVP_DigestFinal_ex(s.ctx,
- (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 {
- return result, errors.New("openssl: sha256: cannot finalize ctx")
- }
- return result, s.Reset()
-}
-
-func SHA256(data []byte) (result [32]byte, err error) {
- hash, err := NewSHA256Hash()
- if err != nil {
- return result, err
- }
- defer hash.Close()
- if _, err := hash.Write(data); err != nil {
- return result, err
- }
- return hash.Sum()
-}
diff --git a/vendor/github.com/libp2p/go-openssl/shim.c b/vendor/github.com/libp2p/go-openssl/shim.c
deleted file mode 100644
index b27a57432..000000000
--- a/vendor/github.com/libp2p/go-openssl/shim.c
+++ /dev/null
@@ -1,778 +0,0 @@
-/*
- * Copyright (C) 2014 Space Monkey, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include
-
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include "_cgo_export.h"
-
-/*
- * Functions defined in other .c files
- */
-extern int go_init_locks();
-extern void go_thread_locking_callback(int, int, const char*, int);
-extern unsigned long go_thread_id_callback();
-static int go_write_bio_puts(BIO *b, const char *str) {
- return go_write_bio_write(b, (char*)str, (int)strlen(str));
-}
-
-/*
- ************************************************
- * v1.1.1 and later implementation
- ************************************************
- */
-#if OPENSSL_VERSION_NUMBER >= 0x1010100fL
-
-const int X_ED25519_SUPPORT = 1;
-int X_EVP_PKEY_ED25519 = EVP_PKEY_ED25519;
-
-int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
- const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){
- return EVP_DigestSignInit(ctx, pctx, type, e, pkey);
-}
-
-int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret,
- size_t *siglen, const unsigned char *tbs, size_t tbslen) {
- return EVP_DigestSign(ctx, sigret, siglen, tbs, tbslen);
-}
-
-
-int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
- const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){
- return EVP_DigestVerifyInit(ctx, pctx, type, e, pkey);
-}
-
-int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret,
- size_t siglen, const unsigned char *tbs, size_t tbslen){
- return EVP_DigestVerify(ctx, sigret, siglen, tbs, tbslen);
-}
-
-#else
-
-const int X_ED25519_SUPPORT = 0;
-int X_EVP_PKEY_ED25519 = EVP_PKEY_NONE;
-
-int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
- const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){
- return 0;
-}
-
-int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret,
- size_t *siglen, const unsigned char *tbs, size_t tbslen) {
- return 0;
-}
-
-
-int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx,
- const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){
- return 0;
-}
-
-int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret,
- size_t siglen, const unsigned char *tbs, size_t tbslen){
- return 0;
-}
-
-#endif
-
-/*
- ************************************************
- * v1.1.X and later implementation
- ************************************************
- */
-#if OPENSSL_VERSION_NUMBER >= 0x1010000fL
-
-void X_BIO_set_data(BIO* bio, void* data) {
- BIO_set_data(bio, data);
-}
-
-void* X_BIO_get_data(BIO* bio) {
- return BIO_get_data(bio);
-}
-
-EVP_MD_CTX* X_EVP_MD_CTX_new() {
- return EVP_MD_CTX_new();
-}
-
-void X_EVP_MD_CTX_free(EVP_MD_CTX* ctx) {
- EVP_MD_CTX_free(ctx);
-}
-
-static int x_bio_create(BIO *b) {
- BIO_set_shutdown(b, 1);
- BIO_set_init(b, 1);
- BIO_set_data(b, NULL);
- BIO_clear_flags(b, ~0);
- return 1;
-}
-
-static int x_bio_free(BIO *b) {
- return 1;
-}
-
-static BIO_METHOD *writeBioMethod;
-static BIO_METHOD *readBioMethod;
-
-BIO_METHOD* BIO_s_readBio() { return readBioMethod; }
-BIO_METHOD* BIO_s_writeBio() { return writeBioMethod; }
-
-int x_bio_init_methods() {
- writeBioMethod = BIO_meth_new(BIO_TYPE_SOURCE_SINK, "Go Write BIO");
- if (!writeBioMethod) {
- return 1;
- }
- if (1 != BIO_meth_set_write(writeBioMethod,
- (int (*)(BIO *, const char *, int))go_write_bio_write)) {
- return 2;
- }
- if (1 != BIO_meth_set_puts(writeBioMethod, go_write_bio_puts)) {
- return 3;
- }
- if (1 != BIO_meth_set_ctrl(writeBioMethod, go_write_bio_ctrl)) {
- return 4;
- }
- if (1 != BIO_meth_set_create(writeBioMethod, x_bio_create)) {
- return 5;
- }
- if (1 != BIO_meth_set_destroy(writeBioMethod, x_bio_free)) {
- return 6;
- }
-
- readBioMethod = BIO_meth_new(BIO_TYPE_SOURCE_SINK, "Go Read BIO");
- if (!readBioMethod) {
- return 7;
- }
- if (1 != BIO_meth_set_read(readBioMethod, go_read_bio_read)) {
- return 8;
- }
- if (1 != BIO_meth_set_ctrl(readBioMethod, go_read_bio_ctrl)) {
- return 9;
- }
- if (1 != BIO_meth_set_create(readBioMethod, x_bio_create)) {
- return 10;
- }
- if (1 != BIO_meth_set_destroy(readBioMethod, x_bio_free)) {
- return 11;
- }
-
- return 0;
-}
-
-const EVP_MD *X_EVP_dss() {
- return NULL;
-}
-
-const EVP_MD *X_EVP_dss1() {
- return NULL;
-}
-
-const EVP_MD *X_EVP_sha() {
- return NULL;
-}
-
-int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) {
- return EVP_CIPHER_CTX_encrypting(ctx);
-}
-
-int X_X509_add_ref(X509* x509) {
- return X509_up_ref(x509);
-}
-
-const ASN1_TIME *X_X509_get0_notBefore(const X509 *x) {
- return X509_get0_notBefore(x);
-}
-
-const ASN1_TIME *X_X509_get0_notAfter(const X509 *x) {
- return X509_get0_notAfter(x);
-}
-
-HMAC_CTX *X_HMAC_CTX_new(void) {
- return HMAC_CTX_new();
-}
-
-void X_HMAC_CTX_free(HMAC_CTX *ctx) {
- HMAC_CTX_free(ctx);
-}
-
-int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) {
- return PEM_write_bio_PrivateKey_traditional(bio, key, enc, kstr, klen, cb, u);
-}
-
-#endif
-
-/*
- ************************************************
- * v1.0.X implementation
- ************************************************
- */
-#if OPENSSL_VERSION_NUMBER < 0x1010000fL
-
-static int x_bio_create(BIO *b) {
- b->shutdown = 1;
- b->init = 1;
- b->num = -1;
- b->ptr = NULL;
- b->flags = 0;
- return 1;
-}
-
-static int x_bio_free(BIO *b) {
- return 1;
-}
-
-static BIO_METHOD writeBioMethod = {
- BIO_TYPE_SOURCE_SINK,
- "Go Write BIO",
- (int (*)(BIO *, const char *, int))go_write_bio_write,
- NULL,
- go_write_bio_puts,
- NULL,
- go_write_bio_ctrl,
- x_bio_create,
- x_bio_free,
- NULL};
-
-static BIO_METHOD* BIO_s_writeBio() { return &writeBioMethod; }
-
-static BIO_METHOD readBioMethod = {
- BIO_TYPE_SOURCE_SINK,
- "Go Read BIO",
- NULL,
- go_read_bio_read,
- NULL,
- NULL,
- go_read_bio_ctrl,
- x_bio_create,
- x_bio_free,
- NULL};
-
-static BIO_METHOD* BIO_s_readBio() { return &readBioMethod; }
-
-int x_bio_init_methods() {
- /* statically initialized above */
- return 0;
-}
-
-void X_BIO_set_data(BIO* bio, void* data) {
- bio->ptr = data;
-}
-
-void* X_BIO_get_data(BIO* bio) {
- return bio->ptr;
-}
-
-EVP_MD_CTX* X_EVP_MD_CTX_new() {
- return EVP_MD_CTX_create();
-}
-
-void X_EVP_MD_CTX_free(EVP_MD_CTX* ctx) {
- EVP_MD_CTX_destroy(ctx);
-}
-
-int X_X509_add_ref(X509* x509) {
- CRYPTO_add(&x509->references, 1, CRYPTO_LOCK_X509);
- return 1;
-}
-
-const ASN1_TIME *X_X509_get0_notBefore(const X509 *x) {
- return x->cert_info->validity->notBefore;
-}
-
-const ASN1_TIME *X_X509_get0_notAfter(const X509 *x) {
- return x->cert_info->validity->notAfter;
-}
-
-const EVP_MD *X_EVP_dss() {
- return EVP_dss();
-}
-
-const EVP_MD *X_EVP_dss1() {
- return EVP_dss1();
-}
-
-const EVP_MD *X_EVP_sha() {
- return EVP_sha();
-}
-
-int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) {
- return ctx->encrypt;
-}
-
-HMAC_CTX *X_HMAC_CTX_new(void) {
- /* v1.1.0 uses a OPENSSL_zalloc to allocate the memory which does not exist
- * in previous versions. malloc+memset to get the same behavior */
- HMAC_CTX *ctx = (HMAC_CTX *)OPENSSL_malloc(sizeof(HMAC_CTX));
- if (ctx) {
- memset(ctx, 0, sizeof(HMAC_CTX));
- HMAC_CTX_init(ctx);
- }
- return ctx;
-}
-
-void X_HMAC_CTX_free(HMAC_CTX *ctx) {
- if (ctx) {
- HMAC_CTX_cleanup(ctx);
- OPENSSL_free(ctx);
- }
-}
-
-int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) {
- /* PEM_write_bio_PrivateKey always tries to use the PKCS8 format if it
- * is available, instead of using the "traditional" format as stated in the
- * OpenSSL man page.
- * i2d_PrivateKey should give us the correct DER encoding, so we'll just
- * use PEM_ASN1_write_bio directly to write the DER encoding with the correct
- * type header. */
-
- int ppkey_id, pkey_base_id, ppkey_flags;
- const char *pinfo, *ppem_str;
- char pem_type_str[80];
-
- // Lookup the ASN1 method information to get the pem type
- if (EVP_PKEY_asn1_get0_info(&ppkey_id, &pkey_base_id, &ppkey_flags, &pinfo, &ppem_str, key->ameth) != 1) {
- return 0;
- }
- // Set up the PEM type string
- if (BIO_snprintf(pem_type_str, 80, "%s PRIVATE KEY", ppem_str) <= 0) {
- // Failed to write out the pem type string, something is really wrong.
- return 0;
- }
- // Write out everything to the BIO
- return PEM_ASN1_write_bio((i2d_of_void *)i2d_PrivateKey,
- pem_type_str, bio, key, enc, kstr, klen, cb, u);
-}
-
-#endif
-
-/*
- ************************************************
- * common implementation
- ************************************************
- */
-
-int X_shim_init() {
- int rc = 0;
-
- OPENSSL_config(NULL);
- ENGINE_load_builtin_engines();
- SSL_load_error_strings();
- SSL_library_init();
- OpenSSL_add_all_algorithms();
- //
- // Set up OPENSSL thread safety callbacks.
- rc = go_init_locks();
- if (rc != 0) {
- return rc;
- }
- CRYPTO_set_locking_callback(go_thread_locking_callback);
- CRYPTO_set_id_callback(go_thread_id_callback);
-
- rc = x_bio_init_methods();
- if (rc != 0) {
- return rc;
- }
-
- return 0;
-}
-
-void * X_OPENSSL_malloc(size_t size) {
- return OPENSSL_malloc(size);
-}
-
-void X_OPENSSL_free(void *ref) {
- OPENSSL_free(ref);
-}
-
-long X_SSL_set_options(SSL* ssl, long options) {
- return SSL_set_options(ssl, options);
-}
-
-long X_SSL_get_options(SSL* ssl) {
- return SSL_get_options(ssl);
-}
-
-long X_SSL_clear_options(SSL* ssl, long options) {
- return SSL_clear_options(ssl, options);
-}
-
-long X_SSL_set_tlsext_host_name(SSL *ssl, const char *name) {
- return SSL_set_tlsext_host_name(ssl, name);
-}
-const char * X_SSL_get_cipher_name(const SSL *ssl) {
- return SSL_get_cipher_name(ssl);
-}
-int X_SSL_session_reused(SSL *ssl) {
- return SSL_session_reused(ssl);
-}
-
-int X_SSL_new_index() {
- return SSL_get_ex_new_index(0, NULL, NULL, NULL, go_ssl_crypto_ex_free);
-}
-
-int X_SSL_verify_cb(int ok, X509_STORE_CTX* store) {
- SSL* ssl = (SSL *)X509_STORE_CTX_get_ex_data(store,
- SSL_get_ex_data_X509_STORE_CTX_idx());
- void* p = SSL_get_ex_data(ssl, get_ssl_idx());
- // get the pointer to the go Ctx object and pass it back into the thunk
- return go_ssl_verify_cb_thunk(p, ok, store);
-}
-
-const SSL_METHOD *X_SSLv23_method() {
- return SSLv23_method();
-}
-
-const SSL_METHOD *X_SSLv3_method() {
-#ifndef OPENSSL_NO_SSL3_METHOD
- return SSLv3_method();
-#else
- return NULL;
-#endif
-}
-
-const SSL_METHOD *X_TLSv1_method() {
- return TLSv1_method();
-}
-
-const SSL_METHOD *X_TLSv1_1_method() {
-#if defined(TLS1_1_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX)
- return TLSv1_1_method();
-#else
- return NULL;
-#endif
-}
-
-const SSL_METHOD *X_TLSv1_2_method() {
-#if defined(TLS1_2_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX)
- return TLSv1_2_method();
-#else
- return NULL;
-#endif
-}
-
-int X_SSL_CTX_new_index() {
- return SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, NULL);
-}
-
-int X_SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version) {
- return SSL_CTX_set_min_proto_version(ctx, version);
-}
-
-int X_SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version) {
- return SSL_CTX_set_max_proto_version(ctx, version);
-}
-
-long X_SSL_CTX_set_options(SSL_CTX* ctx, long options) {
- return SSL_CTX_set_options(ctx, options);
-}
-
-long X_SSL_CTX_clear_options(SSL_CTX* ctx, long options) {
- return SSL_CTX_clear_options(ctx, options);
-}
-
-long X_SSL_CTX_get_options(SSL_CTX* ctx) {
- return SSL_CTX_get_options(ctx);
-}
-
-long X_SSL_CTX_set_mode(SSL_CTX* ctx, long modes) {
- return SSL_CTX_set_mode(ctx, modes);
-}
-
-long X_SSL_CTX_get_mode(SSL_CTX* ctx) {
- return SSL_CTX_get_mode(ctx);
-}
-
-long X_SSL_CTX_set_session_cache_mode(SSL_CTX* ctx, long modes) {
- return SSL_CTX_set_session_cache_mode(ctx, modes);
-}
-
-long X_SSL_CTX_sess_set_cache_size(SSL_CTX* ctx, long t) {
- return SSL_CTX_sess_set_cache_size(ctx, t);
-}
-
-long X_SSL_CTX_sess_get_cache_size(SSL_CTX* ctx) {
- return SSL_CTX_sess_get_cache_size(ctx);
-}
-
-long X_SSL_CTX_set_timeout(SSL_CTX* ctx, long t) {
- return SSL_CTX_set_timeout(ctx, t);
-}
-
-long X_SSL_CTX_get_timeout(SSL_CTX* ctx) {
- return SSL_CTX_get_timeout(ctx);
-}
-
-long X_SSL_CTX_add_extra_chain_cert(SSL_CTX* ctx, X509 *cert) {
- return SSL_CTX_add_extra_chain_cert(ctx, cert);
-}
-
-long X_SSL_CTX_set_tmp_ecdh(SSL_CTX* ctx, EC_KEY *key) {
- return SSL_CTX_set_tmp_ecdh(ctx, key);
-}
-
-long X_SSL_CTX_set_tlsext_servername_callback(
- SSL_CTX* ctx, int (*cb)(SSL *con, int *ad, void *args)) {
- return SSL_CTX_set_tlsext_servername_callback(ctx, cb);
-}
-
-int X_SSL_CTX_verify_cb(int ok, X509_STORE_CTX* store) {
- SSL* ssl = (SSL *)X509_STORE_CTX_get_ex_data(store,
- SSL_get_ex_data_X509_STORE_CTX_idx());
- SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(ssl);
- void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx());
- // get the pointer to the go Ctx object and pass it back into the thunk
- return go_ssl_ctx_verify_cb_thunk(p, ok, store);
-}
-
-long X_SSL_CTX_set_tmp_dh(SSL_CTX* ctx, DH *dh) {
- return SSL_CTX_set_tmp_dh(ctx, dh);
-}
-
-long X_PEM_read_DHparams(SSL_CTX* ctx, DH *dh) {
- return SSL_CTX_set_tmp_dh(ctx, dh);
-}
-
-int X_SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX *sslctx,
- int (*cb)(SSL *s, unsigned char key_name[16],
- unsigned char iv[EVP_MAX_IV_LENGTH],
- EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc)) {
- return SSL_CTX_set_tlsext_ticket_key_cb(sslctx, cb);
-}
-
-int X_SSL_CTX_ticket_key_cb(SSL *s, unsigned char key_name[16],
- unsigned char iv[EVP_MAX_IV_LENGTH],
- EVP_CIPHER_CTX *cctx, HMAC_CTX *hctx, int enc) {
-
- SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(s);
- void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx());
- // get the pointer to the go Ctx object and pass it back into the thunk
- return go_ticket_key_cb_thunk(p, s, key_name, iv, cctx, hctx, enc);
-}
-
-int X_BIO_get_flags(BIO *b) {
- return BIO_get_flags(b);
-}
-
-void X_BIO_set_flags(BIO *b, int flags) {
- return BIO_set_flags(b, flags);
-}
-
-void X_BIO_clear_flags(BIO *b, int flags) {
- BIO_clear_flags(b, flags);
-}
-
-int X_BIO_read(BIO *b, void *buf, int len) {
- return BIO_read(b, buf, len);
-}
-
-int X_BIO_write(BIO *b, const void *buf, int len) {
- return BIO_write(b, buf, len);
-}
-
-BIO *X_BIO_new_write_bio() {
- return BIO_new(BIO_s_writeBio());
-}
-
-BIO *X_BIO_new_read_bio() {
- return BIO_new(BIO_s_readBio());
-}
-
-const EVP_MD *X_EVP_get_digestbyname(const char *name) {
- return EVP_get_digestbyname(name);
-}
-
-const EVP_MD *X_EVP_md_null() {
- return EVP_md_null();
-}
-
-const EVP_MD *X_EVP_md5() {
- return EVP_md5();
-}
-
-const EVP_MD *X_EVP_md4() {
- return EVP_md4();
-}
-
-const EVP_MD *X_EVP_ripemd160() {
- return EVP_ripemd160();
-}
-
-const EVP_MD *X_EVP_sha224() {
- return EVP_sha224();
-}
-
-const EVP_MD *X_EVP_sha1() {
- return EVP_sha1();
-}
-
-const EVP_MD *X_EVP_sha256() {
- return EVP_sha256();
-}
-
-const EVP_MD *X_EVP_sha384() {
- return EVP_sha384();
-}
-
-const EVP_MD *X_EVP_sha512() {
- return EVP_sha512();
-}
-
-int X_EVP_MD_size(const EVP_MD *md) {
- return EVP_MD_size(md);
-}
-
-int X_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl) {
- return EVP_DigestInit_ex(ctx, type, impl);
-}
-
-int X_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt) {
- return EVP_DigestUpdate(ctx, d, cnt);
-}
-
-int X_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s) {
- return EVP_DigestFinal_ex(ctx, md, s);
-}
-
-int X_EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type) {
- return EVP_SignInit(ctx, type);
-}
-
-int X_EVP_SignUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt) {
- return EVP_SignUpdate(ctx, d, cnt);
-}
-
-EVP_PKEY *X_EVP_PKEY_new(void) {
- return EVP_PKEY_new();
-}
-
-void X_EVP_PKEY_free(EVP_PKEY *pkey) {
- EVP_PKEY_free(pkey);
-}
-
-int X_EVP_PKEY_size(EVP_PKEY *pkey) {
- return EVP_PKEY_size(pkey);
-}
-
-struct rsa_st *X_EVP_PKEY_get1_RSA(EVP_PKEY *pkey) {
- return EVP_PKEY_get1_RSA(pkey);
-}
-
-int X_EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key) {
- return EVP_PKEY_set1_RSA(pkey, key);
-}
-
-int X_EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key) {
- return EVP_PKEY_assign(pkey, type, key);
-}
-
-int X_EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, EVP_PKEY *pkey) {
- return EVP_SignFinal(ctx, md, s, pkey);
-}
-
-int X_EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type) {
- return EVP_VerifyInit(ctx, type);
-}
-
-int X_EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *d,
- unsigned int cnt) {
- return EVP_VerifyUpdate(ctx, d, cnt);
-}
-
-int X_EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, unsigned int siglen, EVP_PKEY *pkey) {
- return EVP_VerifyFinal(ctx, sigbuf, siglen, pkey);
-}
-
-int X_EVP_CIPHER_block_size(EVP_CIPHER *c) {
- return EVP_CIPHER_block_size(c);
-}
-
-int X_EVP_CIPHER_key_length(EVP_CIPHER *c) {
- return EVP_CIPHER_key_length(c);
-}
-
-int X_EVP_CIPHER_iv_length(EVP_CIPHER *c) {
- return EVP_CIPHER_iv_length(c);
-}
-
-int X_EVP_CIPHER_nid(EVP_CIPHER *c) {
- return EVP_CIPHER_nid(c);
-}
-
-int X_EVP_CIPHER_CTX_block_size(EVP_CIPHER_CTX *ctx) {
- return EVP_CIPHER_CTX_block_size(ctx);
-}
-
-int X_EVP_CIPHER_CTX_key_length(EVP_CIPHER_CTX *ctx) {
- return EVP_CIPHER_CTX_key_length(ctx);
-}
-
-int X_EVP_CIPHER_CTX_iv_length(EVP_CIPHER_CTX *ctx) {
- return EVP_CIPHER_CTX_iv_length(ctx);
-}
-
-void X_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int padding) {
- //openssl always returns 1 for set_padding
- //hence return value is not checked
- EVP_CIPHER_CTX_set_padding(ctx, padding);
-}
-
-const EVP_CIPHER *X_EVP_CIPHER_CTX_cipher(EVP_CIPHER_CTX *ctx) {
- return EVP_CIPHER_CTX_cipher(ctx);
-}
-
-int X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid) {
- return EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid);
-}
-
-size_t X_HMAC_size(const HMAC_CTX *e) {
- return HMAC_size(e);
-}
-
-int X_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl) {
- return HMAC_Init_ex(ctx, key, len, md, impl);
-}
-
-int X_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len) {
- return HMAC_Update(ctx, data, len);
-}
-
-int X_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len) {
- return HMAC_Final(ctx, md, len);
-}
-
-int X_sk_X509_num(STACK_OF(X509) *sk) {
- return sk_X509_num(sk);
-}
-
-X509 *X_sk_X509_value(STACK_OF(X509)* sk, int i) {
- return sk_X509_value(sk, i);
-}
-
-long X_X509_get_version(const X509 *x) {
- return X509_get_version(x);
-}
-
-int X_X509_set_version(X509 *x, long version) {
- return X509_set_version(x, version);
-}
diff --git a/vendor/github.com/libp2p/go-openssl/shim.h b/vendor/github.com/libp2p/go-openssl/shim.h
deleted file mode 100644
index 94fe8c612..000000000
--- a/vendor/github.com/libp2p/go-openssl/shim.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2014 Space Monkey, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-
-#ifndef SSL_MODE_RELEASE_BUFFERS
-#define SSL_MODE_RELEASE_BUFFERS 0
-#endif
-
-#ifndef SSL_OP_NO_COMPRESSION
-#define SSL_OP_NO_COMPRESSION 0
-#endif
-
-/* shim methods */
-extern int X_shim_init();
-
-/* Library methods */
-extern void X_OPENSSL_free(void *ref);
-extern void *X_OPENSSL_malloc(size_t size);
-
-/* SSL methods */
-extern long X_SSL_set_options(SSL* ssl, long options);
-extern long X_SSL_get_options(SSL* ssl);
-extern long X_SSL_clear_options(SSL* ssl, long options);
-extern long X_SSL_set_tlsext_host_name(SSL *ssl, const char *name);
-extern const char * X_SSL_get_cipher_name(const SSL *ssl);
-extern int X_SSL_session_reused(SSL *ssl);
-extern int X_SSL_new_index();
-
-extern const SSL_METHOD *X_SSLv23_method();
-extern const SSL_METHOD *X_SSLv3_method();
-extern const SSL_METHOD *X_TLSv1_method();
-extern const SSL_METHOD *X_TLSv1_1_method();
-extern const SSL_METHOD *X_TLSv1_2_method();
-
-#if defined SSL_CTRL_SET_TLSEXT_HOSTNAME
-extern int sni_cb(SSL *ssl_conn, int *ad, void *arg);
-#endif
-extern int X_SSL_verify_cb(int ok, X509_STORE_CTX* store);
-
-/* SSL_CTX methods */
-extern int X_SSL_CTX_new_index();
-extern int X_SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
-extern int X_SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
-extern long X_SSL_CTX_set_options(SSL_CTX* ctx, long options);
-extern long X_SSL_CTX_clear_options(SSL_CTX* ctx, long options);
-extern long X_SSL_CTX_get_options(SSL_CTX* ctx);
-extern long X_SSL_CTX_set_mode(SSL_CTX* ctx, long modes);
-extern long X_SSL_CTX_get_mode(SSL_CTX* ctx);
-extern long X_SSL_CTX_set_session_cache_mode(SSL_CTX* ctx, long modes);
-extern long X_SSL_CTX_sess_set_cache_size(SSL_CTX* ctx, long t);
-extern long X_SSL_CTX_sess_get_cache_size(SSL_CTX* ctx);
-extern long X_SSL_CTX_set_timeout(SSL_CTX* ctx, long t);
-extern long X_SSL_CTX_get_timeout(SSL_CTX* ctx);
-extern long X_SSL_CTX_add_extra_chain_cert(SSL_CTX* ctx, X509 *cert);
-extern long X_SSL_CTX_set_tmp_ecdh(SSL_CTX* ctx, EC_KEY *key);
-extern long X_SSL_CTX_set_tlsext_servername_callback(SSL_CTX* ctx, int (*cb)(SSL *con, int *ad, void *args));
-extern int X_SSL_CTX_verify_cb(int ok, X509_STORE_CTX* store);
-extern long X_SSL_CTX_set_tmp_dh(SSL_CTX* ctx, DH *dh);
-extern long X_PEM_read_DHparams(SSL_CTX* ctx, DH *dh);
-extern int X_SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX *sslctx,
- int (*cb)(SSL *s, unsigned char key_name[16],
- unsigned char iv[EVP_MAX_IV_LENGTH],
- EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc));
-extern int X_SSL_CTX_ticket_key_cb(SSL *s, unsigned char key_name[16],
- unsigned char iv[EVP_MAX_IV_LENGTH],
- EVP_CIPHER_CTX *cctx, HMAC_CTX *hctx, int enc);
-extern int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const unsigned char *protos,
- unsigned int protos_len);
-
-/* BIO methods */
-extern int X_BIO_get_flags(BIO *b);
-extern void X_BIO_set_flags(BIO *bio, int flags);
-extern void X_BIO_clear_flags(BIO *bio, int flags);
-extern void X_BIO_set_data(BIO *bio, void* data);
-extern void *X_BIO_get_data(BIO *bio);
-extern int X_BIO_read(BIO *b, void *buf, int len);
-extern int X_BIO_write(BIO *b, const void *buf, int len);
-extern BIO *X_BIO_new_write_bio();
-extern BIO *X_BIO_new_read_bio();
-
-/* EVP methods */
-extern const int X_ED25519_SUPPORT;
-extern int X_EVP_PKEY_ED25519;
-extern const EVP_MD *X_EVP_get_digestbyname(const char *name);
-extern EVP_MD_CTX *X_EVP_MD_CTX_new();
-extern void X_EVP_MD_CTX_free(EVP_MD_CTX *ctx);
-extern const EVP_MD *X_EVP_md_null();
-extern const EVP_MD *X_EVP_md5();
-extern const EVP_MD *X_EVP_md4();
-extern const EVP_MD *X_EVP_sha();
-extern const EVP_MD *X_EVP_sha1();
-extern const EVP_MD *X_EVP_dss();
-extern const EVP_MD *X_EVP_dss1();
-extern const EVP_MD *X_EVP_ripemd160();
-extern const EVP_MD *X_EVP_sha224();
-extern const EVP_MD *X_EVP_sha256();
-extern const EVP_MD *X_EVP_sha384();
-extern const EVP_MD *X_EVP_sha512();
-extern int X_EVP_MD_size(const EVP_MD *md);
-extern int X_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl);
-extern int X_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt);
-extern int X_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s);
-extern int X_EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type);
-extern int X_EVP_SignUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt);
-extern int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
-extern int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen, const unsigned char *tbs, size_t tbslen);
-extern EVP_PKEY *X_EVP_PKEY_new(void);
-extern void X_EVP_PKEY_free(EVP_PKEY *pkey);
-extern int X_EVP_PKEY_size(EVP_PKEY *pkey);
-extern struct rsa_st *X_EVP_PKEY_get1_RSA(EVP_PKEY *pkey);
-extern int X_EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key);
-extern int X_EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key);
-extern int X_EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, EVP_PKEY *pkey);
-extern int X_EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type);
-extern int X_EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt);
-extern int X_EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, unsigned int siglen, EVP_PKEY *pkey);
-extern int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
-extern int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, size_t siglen, const unsigned char *tbs, size_t tbslen);
-extern int X_EVP_CIPHER_block_size(EVP_CIPHER *c);
-extern int X_EVP_CIPHER_key_length(EVP_CIPHER *c);
-extern int X_EVP_CIPHER_iv_length(EVP_CIPHER *c);
-extern int X_EVP_CIPHER_nid(EVP_CIPHER *c);
-extern int X_EVP_CIPHER_CTX_block_size(EVP_CIPHER_CTX *ctx);
-extern int X_EVP_CIPHER_CTX_key_length(EVP_CIPHER_CTX *ctx);
-extern int X_EVP_CIPHER_CTX_iv_length(EVP_CIPHER_CTX *ctx);
-extern void X_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int padding);
-extern const EVP_CIPHER *X_EVP_CIPHER_CTX_cipher(EVP_CIPHER_CTX *ctx);
-extern int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx);
-extern int X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid);
-
-/* HMAC methods */
-extern size_t X_HMAC_size(const HMAC_CTX *e);
-extern HMAC_CTX *X_HMAC_CTX_new(void);
-extern void X_HMAC_CTX_free(HMAC_CTX *ctx);
-extern int X_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl);
-extern int X_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len);
-extern int X_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len);
-
-/* X509 methods */
-extern int X_X509_add_ref(X509* x509);
-extern const ASN1_TIME *X_X509_get0_notBefore(const X509 *x);
-extern const ASN1_TIME *X_X509_get0_notAfter(const X509 *x);
-extern int X_sk_X509_num(STACK_OF(X509) *sk);
-extern X509 *X_sk_X509_value(STACK_OF(X509)* sk, int i);
-extern long X_X509_get_version(const X509 *x);
-extern int X_X509_set_version(X509 *x, long version);
-
-/* PEM methods */
-extern int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u);
-
-/* Object methods */
-extern int OBJ_create(const char *oid,const char *sn,const char *ln);
-
-/* Extension helper method */
-extern const unsigned char * get_extention(X509 *x, int NID, int *data_len);
-extern int add_custom_ext(X509 *cert, int nid, char *value, int len);
\ No newline at end of file
diff --git a/vendor/github.com/libp2p/go-openssl/sni.c b/vendor/github.com/libp2p/go-openssl/sni.c
deleted file mode 100644
index f9e8d16b0..000000000
--- a/vendor/github.com/libp2p/go-openssl/sni.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include
-#include "_cgo_export.h"
-#include
-
-int sni_cb(SSL *con, int *ad, void *arg) {
- SSL_CTX* ssl_ctx = ssl_ctx = SSL_get_SSL_CTX(con);
- void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx());
- return sni_cb_thunk(p, con, ad, arg);
-}
diff --git a/vendor/github.com/libp2p/go-openssl/ssl.go b/vendor/github.com/libp2p/go-openssl/ssl.go
deleted file mode 100644
index b187d15d5..000000000
--- a/vendor/github.com/libp2p/go-openssl/ssl.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "os"
- "unsafe"
-
- "github.com/mattn/go-pointer"
-)
-
-type SSLTLSExtErr int
-
-const (
- SSLTLSExtErrOK SSLTLSExtErr = C.SSL_TLSEXT_ERR_OK
- SSLTLSExtErrAlertWarning SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_WARNING
- SSLTLSEXTErrAlertFatal SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_FATAL
- SSLTLSEXTErrNoAck SSLTLSExtErr = C.SSL_TLSEXT_ERR_NOACK
-)
-
-var (
- ssl_idx = C.X_SSL_new_index()
-)
-
-//export get_ssl_idx
-func get_ssl_idx() C.int {
- return ssl_idx
-}
-
-type SSL struct {
- ssl *C.SSL
- verify_cb VerifyCallback
-}
-
-//export go_ssl_verify_cb_thunk
-func go_ssl_verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int {
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: verify callback panic'd: %v", err)
- os.Exit(1)
- }
- }()
- verify_cb := pointer.Restore(p).(*SSL).verify_cb
- // set up defaults just in case verify_cb is nil
- if verify_cb != nil {
- store := &CertificateStoreCtx{ctx: ctx}
- if verify_cb(ok == 1, store) {
- ok = 1
- } else {
- ok = 0
- }
- }
- return ok
-}
-
-// Wrapper around SSL_get_servername. Returns server name according to rfc6066
-// http://tools.ietf.org/html/rfc6066.
-func (s *SSL) GetServername() string {
- return C.GoString(C.SSL_get_servername(s.ssl, C.TLSEXT_NAMETYPE_host_name))
-}
-
-// GetOptions returns SSL options. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
-func (s *SSL) GetOptions() Options {
- return Options(C.X_SSL_get_options(s.ssl))
-}
-
-// SetOptions sets SSL options. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
-func (s *SSL) SetOptions(options Options) Options {
- return Options(C.X_SSL_set_options(s.ssl, C.long(options)))
-}
-
-// ClearOptions clear SSL options. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
-func (s *SSL) ClearOptions(options Options) Options {
- return Options(C.X_SSL_clear_options(s.ssl, C.long(options)))
-}
-
-// SetVerify controls peer verification settings. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) SetVerify(options VerifyOptions, verify_cb VerifyCallback) {
- s.verify_cb = verify_cb
- if verify_cb != nil {
- C.SSL_set_verify(s.ssl, C.int(options), (*[0]byte)(C.X_SSL_verify_cb))
- } else {
- C.SSL_set_verify(s.ssl, C.int(options), nil)
- }
-}
-
-// SetVerifyMode controls peer verification setting. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) SetVerifyMode(options VerifyOptions) {
- s.SetVerify(options, s.verify_cb)
-}
-
-// SetVerifyCallback controls peer verification setting. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) SetVerifyCallback(verify_cb VerifyCallback) {
- s.SetVerify(s.VerifyMode(), verify_cb)
-}
-
-// GetVerifyCallback returns callback function. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) GetVerifyCallback() VerifyCallback {
- return s.verify_cb
-}
-
-// VerifyMode returns peer verification setting. See
-// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) VerifyMode() VerifyOptions {
- return VerifyOptions(C.SSL_get_verify_mode(s.ssl))
-}
-
-// SetVerifyDepth controls how many certificates deep the certificate
-// verification logic is willing to follow a certificate chain. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) SetVerifyDepth(depth int) {
- C.SSL_set_verify_depth(s.ssl, C.int(depth))
-}
-
-// GetVerifyDepth controls how many certificates deep the certificate
-// verification logic is willing to follow a certificate chain. See
-// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
-func (s *SSL) GetVerifyDepth() int {
- return int(C.SSL_get_verify_depth(s.ssl))
-}
-
-// SetSSLCtx changes context to new one. Useful for Server Name Indication (SNI)
-// rfc6066 http://tools.ietf.org/html/rfc6066. See
-// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni
-func (s *SSL) SetSSLCtx(ctx *Ctx) {
- /*
- * SSL_set_SSL_CTX() only changes certs as of 1.0.0d
- * adjust other things we care about
- */
- C.SSL_set_SSL_CTX(s.ssl, ctx.ctx)
-}
-
-//export sni_cb_thunk
-func sni_cb_thunk(p unsafe.Pointer, con *C.SSL, ad unsafe.Pointer, arg unsafe.Pointer) C.int {
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: verify callback sni panic'd: %v", err)
- os.Exit(1)
- }
- }()
-
- sni_cb := pointer.Restore(p).(*Ctx).sni_cb
-
- s := &SSL{ssl: con}
- // This attaches a pointer to our SSL struct into the SNI callback.
- C.SSL_set_ex_data(s.ssl, get_ssl_idx(), pointer.Save(s))
-
- // Note: this is ctx.sni_cb, not C.sni_cb
- return C.int(sni_cb(s))
-}
diff --git a/vendor/github.com/libp2p/go-openssl/tickets.go b/vendor/github.com/libp2p/go-openssl/tickets.go
deleted file mode 100644
index 2ee8ed9b8..000000000
--- a/vendor/github.com/libp2p/go-openssl/tickets.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright (C) 2017. See AUTHORS.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package openssl
-
-// #include "shim.h"
-import "C"
-
-import (
- "os"
- "unsafe"
-
- "github.com/mattn/go-pointer"
-)
-
-const (
- KeyNameSize = 16
-)
-
-// TicketCipherCtx describes the cipher that will be used by the ticket store
-// for encrypting the tickets. Engine may be nil if no engine is desired.
-type TicketCipherCtx struct {
- Cipher *Cipher
- Engine *Engine
-}
-
-// TicketDigestCtx describes the digest that will be used by the ticket store
-// to authenticate the data. Engine may be nil if no engine is desired.
-type TicketDigestCtx struct {
- Digest *Digest
- Engine *Engine
-}
-
-// TicketName is an identifier for the key material for a ticket.
-type TicketName [KeyNameSize]byte
-
-// TicketKey is the key material for a ticket. If this is lost, forward secrecy
-// is lost as it allows decrypting TLS sessions retroactively.
-type TicketKey struct {
- Name TicketName
- CipherKey []byte
- HMACKey []byte
- IV []byte
-}
-
-// TicketKeyManager is a manager for TicketKeys. It allows one to control the
-// lifetime of tickets, causing renewals and expirations for keys that are
-// created. Calls to the manager are serialized.
-type TicketKeyManager interface {
- // New should create a brand new TicketKey with a new name.
- New() *TicketKey
-
- // Current should return a key that is still valid.
- Current() *TicketKey
-
- // Lookup should return a key with the given name, or nil if no name
- // exists.
- Lookup(name TicketName) *TicketKey
-
- // Expired should return if the key with the given name is expired and
- // should not be used any more.
- Expired(name TicketName) bool
-
- // ShouldRenew should return if the key is still ok to use for the current
- // session, but we should send a new key for the client.
- ShouldRenew(name TicketName) bool
-}
-
-// TicketStore descibes the encryption and authentication methods the tickets
-// will use along with a key manager for generating and keeping track of the
-// secrets.
-type TicketStore struct {
- CipherCtx TicketCipherCtx
- DigestCtx TicketDigestCtx
- Keys TicketKeyManager
-}
-
-func (t *TicketStore) cipherEngine() *C.ENGINE {
- if t.CipherCtx.Engine == nil {
- return nil
- }
- return t.CipherCtx.Engine.e
-}
-
-func (t *TicketStore) digestEngine() *C.ENGINE {
- if t.DigestCtx.Engine == nil {
- return nil
- }
- return t.DigestCtx.Engine.e
-}
-
-const (
- // instruct to do a handshake
- ticket_resp_requireHandshake = 0
- // crypto context is set up correctly
- ticket_resp_sessionOk = 1
- // crypto context is ok, but the ticket should be reissued
- ticket_resp_renewSession = 2
- // we had a problem that shouldn't fall back to doing a handshake
- ticket_resp_error = -1
-
- // asked to create session crypto context
- ticket_req_newSession = 1
- // asked to load crypto context for a previous session
- ticket_req_lookupSession = 0
-)
-
-//export go_ticket_key_cb_thunk
-func go_ticket_key_cb_thunk(p unsafe.Pointer, s *C.SSL, key_name *C.uchar,
- iv *C.uchar, cctx *C.EVP_CIPHER_CTX, hctx *C.HMAC_CTX, enc C.int) C.int {
-
- // no panic's allowed. it's super hard to guarantee any state at this point
- // so just abort everything.
- defer func() {
- if err := recover(); err != nil {
- logger.Critf("openssl: ticket key callback panic'd: %v", err)
- os.Exit(1)
- }
- }()
-
- ctx := pointer.Restore(p).(*Ctx)
- store := ctx.ticket_store
- if store == nil {
- // TODO(jeff): should this be an error condition? it doesn't make sense
- // to be called if we don't have a store I believe, but that's probably
- // not worth aborting the handshake which is what I believe returning
- // an error would do.
- return ticket_resp_requireHandshake
- }
-
- ctx.ticket_store_mu.Lock()
- defer ctx.ticket_store_mu.Unlock()
-
- switch enc {
- case ticket_req_newSession:
- key := store.Keys.Current()
- if key == nil {
- key = store.Keys.New()
- if key == nil {
- return ticket_resp_requireHandshake
- }
- }
-
- C.memcpy(
- unsafe.Pointer(key_name),
- unsafe.Pointer(&key.Name[0]),
- KeyNameSize)
- C.EVP_EncryptInit_ex(
- cctx,
- store.CipherCtx.Cipher.ptr,
- store.cipherEngine(),
- (*C.uchar)(&key.CipherKey[0]),
- (*C.uchar)(&key.IV[0]))
- C.HMAC_Init_ex(
- hctx,
- unsafe.Pointer(&key.HMACKey[0]),
- C.int(len(key.HMACKey)),
- store.DigestCtx.Digest.ptr,
- store.digestEngine())
-
- return ticket_resp_sessionOk
-
- case ticket_req_lookupSession:
- var name TicketName
- C.memcpy(
- unsafe.Pointer(&name[0]),
- unsafe.Pointer(key_name),
- KeyNameSize)
-
- key := store.Keys.Lookup(name)
- if key == nil {
- return ticket_resp_requireHandshake
- }
- if store.Keys.Expired(name) {
- return ticket_resp_requireHandshake
- }
-
- C.EVP_DecryptInit_ex(
- cctx,
- store.CipherCtx.Cipher.ptr,
- store.cipherEngine(),
- (*C.uchar)(&key.CipherKey[0]),
- (*C.uchar)(&key.IV[0]))
- C.HMAC_Init_ex(
- hctx,
- unsafe.Pointer(&key.HMACKey[0]),
- C.int(len(key.HMACKey)),
- store.DigestCtx.Digest.ptr,
- store.digestEngine())
-
- if store.Keys.ShouldRenew(name) {
- return ticket_resp_renewSession
- }
-
- return ticket_resp_sessionOk
-
- default:
- return ticket_resp_error
- }
-}
-
-// SetTicketStore sets the ticket store for the context so that clients can do
-// ticket based session resumption. If the store is nil, the
-func (c *Ctx) SetTicketStore(store *TicketStore) {
- c.ticket_store = store
-
- if store == nil {
- C.X_SSL_CTX_set_tlsext_ticket_key_cb(c.ctx, nil)
- } else {
- C.X_SSL_CTX_set_tlsext_ticket_key_cb(c.ctx,
- (*[0]byte)(C.X_SSL_CTX_ticket_key_cb))
- }
-}
diff --git a/vendor/github.com/libp2p/go-openssl/utils/errors.go b/vendor/github.com/libp2p/go-openssl/utils/errors.go
deleted file mode 100644
index bab314c95..000000000
--- a/vendor/github.com/libp2p/go-openssl/utils/errors.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "errors"
- "strings"
-)
-
-// ErrorGroup collates errors
-type ErrorGroup struct {
- Errors []error
-}
-
-// Add adds an error to an existing error group
-func (e *ErrorGroup) Add(err error) {
- if err != nil {
- e.Errors = append(e.Errors, err)
- }
-}
-
-// Finalize returns an error corresponding to the ErrorGroup state. If there's
-// no errors in the group, finalize returns nil. If there's only one error,
-// Finalize returns that error. Otherwise, Finalize will make a new error
-// consisting of the messages from the constituent errors.
-func (e *ErrorGroup) Finalize() error {
- if len(e.Errors) == 0 {
- return nil
- }
- if len(e.Errors) == 1 {
- return e.Errors[0]
- }
- msgs := make([]string, 0, len(e.Errors))
- for _, err := range e.Errors {
- msgs = append(msgs, err.Error())
- }
- return errors.New(strings.Join(msgs, "\n"))
-}
diff --git a/vendor/github.com/libp2p/go-openssl/utils/future.go b/vendor/github.com/libp2p/go-openssl/utils/future.go
deleted file mode 100644
index df2d8312c..000000000
--- a/vendor/github.com/libp2p/go-openssl/utils/future.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package utils
-
-import (
- "sync"
-)
-
-// Future is a type that is essentially the inverse of a channel. With a
-// channel, you have multiple senders and one receiver. With a future, you can
-// have multiple receivers and one sender. Additionally, a future protects
-// against double-sends. Since this is usually used for returning function
-// results, we also capture and return error values as well. Use NewFuture
-// to initialize.
-type Future struct {
- mutex *sync.Mutex
- cond *sync.Cond
- received bool
- val interface{}
- err error
-}
-
-// NewFuture returns an initialized and ready Future.
-func NewFuture() *Future {
- mutex := &sync.Mutex{}
- return &Future{
- mutex: mutex,
- cond: sync.NewCond(mutex),
- received: false,
- val: nil,
- err: nil,
- }
-}
-
-// Get blocks until the Future has a value set.
-func (f *Future) Get() (interface{}, error) {
- f.mutex.Lock()
- defer f.mutex.Unlock()
- for {
- if f.received {
- return f.val, f.err
- }
- f.cond.Wait()
- }
-}
-
-// Fired returns whether or not a value has been set. If Fired is true, Get
-// won't block.
-func (f *Future) Fired() bool {
- f.mutex.Lock()
- defer f.mutex.Unlock()
- return f.received
-}
-
-// Set provides the value to present and future Get calls. If Set has already
-// been called, this is a no-op.
-func (f *Future) Set(val interface{}, err error) {
- f.mutex.Lock()
- defer f.mutex.Unlock()
- if f.received {
- return
- }
- f.received = true
- f.val = val
- f.err = err
- f.cond.Broadcast()
-}
diff --git a/vendor/github.com/libp2p/go-reuseport/README.md b/vendor/github.com/libp2p/go-reuseport/README.md
index b99bfa40b..d511adebc 100644
--- a/vendor/github.com/libp2p/go-reuseport/README.md
+++ b/vendor/github.com/libp2p/go-reuseport/README.md
@@ -1,14 +1,13 @@
# go-reuseport
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
+[![GoDoc](https://godoc.org/github.com/libp2p/go-reuseport?status.svg)](https://godoc.org/github.com/libp2p/go-reuseport)
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/)
[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p)
[![codecov](https://codecov.io/gh/libp2p/go-reuseport/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-reuseport)
[![Travis CI](https://travis-ci.org/libp2p/go-reuseport.svg?branch=master)](https://travis-ci.org/libp2p/go-reuseport)
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
-**NOTE:** This package REQUIRES go >= 1.11.
-
This package enables listening and dialing from _the same_ TCP or UDP port.
This means that the following sockopts may be set:
@@ -17,8 +16,6 @@ SO_REUSEADDR
SO_REUSEPORT
```
-- godoc: https://godoc.org/github.com/libp2p/go-reuseport
-
This is a simple package to help with address reuse. This is particularly
important when attempting to do TCP NAT holepunching, which requires a process
to both Listen and Dial on the same TCP port. This package provides some
diff --git a/vendor/github.com/libp2p/go-reuseport/control_unix.go b/vendor/github.com/libp2p/go-reuseport/control_unix.go
index 0cc5da005..4197d1f74 100644
--- a/vendor/github.com/libp2p/go-reuseport/control_unix.go
+++ b/vendor/github.com/libp2p/go-reuseport/control_unix.go
@@ -1,5 +1,4 @@
//go:build !plan9 && !windows && !wasm
-// +build !plan9,!windows,!wasm
package reuseport
@@ -9,18 +8,16 @@ import (
"golang.org/x/sys/unix"
)
-func Control(network, address string, c syscall.RawConn) error {
- var err error
- c.Control(func(fd uintptr) {
+func Control(network, address string, c syscall.RawConn) (err error) {
+ controlErr := c.Control(func(fd uintptr) {
err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
if err != nil {
return
}
-
err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
- if err != nil {
- return
- }
})
- return err
+ if controlErr != nil {
+ err = controlErr
+ }
+ return
}
diff --git a/vendor/github.com/libp2p/go-reuseport/control_wasm.go b/vendor/github.com/libp2p/go-reuseport/control_wasm.go
index f37ed97c2..8b22fade5 100644
--- a/vendor/github.com/libp2p/go-reuseport/control_wasm.go
+++ b/vendor/github.com/libp2p/go-reuseport/control_wasm.go
@@ -1,5 +1,4 @@
//go:build wasm
-// +build wasm
package reuseport
diff --git a/vendor/github.com/libp2p/go-reuseport/control_windows.go b/vendor/github.com/libp2p/go-reuseport/control_windows.go
index 840534c97..c45e43f4b 100644
--- a/vendor/github.com/libp2p/go-reuseport/control_windows.go
+++ b/vendor/github.com/libp2p/go-reuseport/control_windows.go
@@ -7,7 +7,11 @@ import (
)
func Control(network, address string, c syscall.RawConn) (err error) {
- return c.Control(func(fd uintptr) {
+ controlErr := c.Control(func(fd uintptr) {
err = windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_REUSEADDR, 1)
})
+ if controlErr != nil {
+ err = controlErr
+ }
+ return
}
diff --git a/vendor/github.com/libp2p/go-reuseport/interface.go b/vendor/github.com/libp2p/go-reuseport/interface.go
index db6163a17..b864da8c5 100644
--- a/vendor/github.com/libp2p/go-reuseport/interface.go
+++ b/vendor/github.com/libp2p/go-reuseport/interface.go
@@ -4,14 +4,14 @@
//
// For example:
//
-// // listen on the same port. oh yeah.
-// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
-// l2, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+// // listen on the same port. oh yeah.
+// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+// l2, _ := reuse.Listen("tcp", "127.0.0.1:1234")
//
-// // dial from the same port. oh yeah.
-// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
-// l2, _ := reuse.Listen("tcp", "127.0.0.1:1235")
-// c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235")
+// // dial from the same port. oh yeah.
+// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
+// l2, _ := reuse.Listen("tcp", "127.0.0.1:1235")
+// c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235")
//
// Note: cant dial self because tcp/ip stacks use 4-tuples to identify connections,
// and doing so would clash.
@@ -21,6 +21,7 @@ import (
"context"
"fmt"
"net"
+ "time"
)
// Available returns whether or not SO_REUSEPORT or equivalent behaviour is
@@ -47,10 +48,17 @@ func ListenPacket(network, address string) (net.PacketConn, error) {
return listenConfig.ListenPacket(context.Background(), network, address)
}
-// Dial dials the given network and address. see net.Dialer.Dial
+// Dial dials the given network and address. see net.Dial
// Returns a net.Conn created from a file descriptor for a socket
// with SO_REUSEPORT and SO_REUSEADDR option set.
func Dial(network, laddr, raddr string) (net.Conn, error) {
+ return DialTimeout(network, laddr, raddr, time.Duration(0))
+}
+
+// Dial dials the given network and address, with the given timeout. see
+// net.DialTimeout Returns a net.Conn created from a file descriptor for
+// a socket with SO_REUSEPORT and SO_REUSEADDR option set.
+func DialTimeout(network, laddr, raddr string, timeout time.Duration) (net.Conn, error) {
nla, err := ResolveAddr(network, laddr)
if err != nil {
return nil, fmt.Errorf("failed to resolve local addr: %w", err)
@@ -58,6 +66,7 @@ func Dial(network, laddr, raddr string) (net.Conn, error) {
d := net.Dialer{
Control: Control,
LocalAddr: nla,
+ Timeout: timeout,
}
return d.Dial(network, raddr)
}
diff --git a/vendor/github.com/libp2p/go-reuseport/version.json b/vendor/github.com/libp2p/go-reuseport/version.json
index 1437d5b73..a654d65ab 100644
--- a/vendor/github.com/libp2p/go-reuseport/version.json
+++ b/vendor/github.com/libp2p/go-reuseport/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.2.0"
+ "version": "v0.3.0"
}
diff --git a/vendor/github.com/libp2p/go-yamux/v3/version.json b/vendor/github.com/libp2p/go-yamux/v3/version.json
deleted file mode 100644
index 86d7f5ec6..000000000
--- a/vendor/github.com/libp2p/go-yamux/v3/version.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "version": "v3.1.2"
-}
diff --git a/vendor/github.com/libp2p/go-yamux/v3/.gitignore b/vendor/github.com/libp2p/go-yamux/v4/.gitignore
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/.gitignore
rename to vendor/github.com/libp2p/go-yamux/v4/.gitignore
diff --git a/vendor/github.com/libp2p/go-yamux/v3/LICENSE b/vendor/github.com/libp2p/go-yamux/v4/LICENSE
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/LICENSE
rename to vendor/github.com/libp2p/go-yamux/v4/LICENSE
diff --git a/vendor/github.com/libp2p/go-yamux/v3/LICENSE-BSD b/vendor/github.com/libp2p/go-yamux/v4/LICENSE-BSD
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/LICENSE-BSD
rename to vendor/github.com/libp2p/go-yamux/v4/LICENSE-BSD
diff --git a/vendor/github.com/libp2p/go-yamux/v3/README.md b/vendor/github.com/libp2p/go-yamux/v4/README.md
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/README.md
rename to vendor/github.com/libp2p/go-yamux/v4/README.md
diff --git a/vendor/github.com/libp2p/go-yamux/v3/addr.go b/vendor/github.com/libp2p/go-yamux/v4/addr.go
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/addr.go
rename to vendor/github.com/libp2p/go-yamux/v4/addr.go
diff --git a/vendor/github.com/libp2p/go-yamux/v3/const.go b/vendor/github.com/libp2p/go-yamux/v4/const.go
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/const.go
rename to vendor/github.com/libp2p/go-yamux/v4/const.go
diff --git a/vendor/github.com/libp2p/go-yamux/v3/deadline.go b/vendor/github.com/libp2p/go-yamux/v4/deadline.go
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/deadline.go
rename to vendor/github.com/libp2p/go-yamux/v4/deadline.go
diff --git a/vendor/github.com/libp2p/go-yamux/v3/mux.go b/vendor/github.com/libp2p/go-yamux/v4/mux.go
similarity index 89%
rename from vendor/github.com/libp2p/go-yamux/v3/mux.go
rename to vendor/github.com/libp2p/go-yamux/v4/mux.go
index 458a0e210..edd2e388b 100644
--- a/vendor/github.com/libp2p/go-yamux/v3/mux.go
+++ b/vendor/github.com/libp2p/go-yamux/v4/mux.go
@@ -25,6 +25,9 @@ type Config struct {
// KeepAliveInterval is how often to perform the keep alive
KeepAliveInterval time.Duration
+ // MeasureRTTInterval is how often to re-measure the round trip time
+ MeasureRTTInterval time.Duration
+
// ConnectionWriteTimeout is meant to be a "safety valve" timeout after
// we which will suspect a problem with the underlying connection and
// close it. This is only applied to writes, where's there's generally
@@ -69,6 +72,7 @@ func DefaultConfig() *Config {
PingBacklog: 32,
EnableKeepAlive: true,
KeepAliveInterval: 30 * time.Second,
+ MeasureRTTInterval: 30 * time.Second,
ConnectionWriteTimeout: 10 * time.Second,
MaxIncomingStreams: 1000,
InitialStreamWindowSize: initialStreamWindow,
@@ -88,6 +92,10 @@ func VerifyConfig(config *Config) error {
if config.KeepAliveInterval == 0 {
return fmt.Errorf("keep-alive interval must be positive")
}
+ if config.MeasureRTTInterval == 0 {
+ return fmt.Errorf("measure-rtt interval must be positive")
+ }
+
if config.InitialStreamWindowSize < initialStreamWindow {
return errors.New("InitialStreamWindowSize must be larger or equal 256 kB")
}
@@ -109,7 +117,7 @@ func VerifyConfig(config *Config) error {
// Server is used to initialize a new server-side connection.
// There must be at most one server-side connection. If a nil config is
// provided, the DefaultConfiguration will be used.
-func Server(conn net.Conn, config *Config, mm MemoryManager) (*Session, error) {
+func Server(conn net.Conn, config *Config, mm func() (MemoryManager, error)) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
@@ -121,7 +129,7 @@ func Server(conn net.Conn, config *Config, mm MemoryManager) (*Session, error) {
// Client is used to initialize a new client-side connection.
// There must be at most one client-side connection.
-func Client(conn net.Conn, config *Config, mm MemoryManager) (*Session, error) {
+func Client(conn net.Conn, config *Config, mm func() (MemoryManager, error)) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
diff --git a/vendor/github.com/libp2p/go-yamux/v3/ping.go b/vendor/github.com/libp2p/go-yamux/v4/ping.go
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/ping.go
rename to vendor/github.com/libp2p/go-yamux/v4/ping.go
diff --git a/vendor/github.com/libp2p/go-yamux/v3/session.go b/vendor/github.com/libp2p/go-yamux/v4/session.go
similarity index 90%
rename from vendor/github.com/libp2p/go-yamux/v3/session.go
rename to vendor/github.com/libp2p/go-yamux/v4/session.go
index 38adcd6e9..aecc20b60 100644
--- a/vendor/github.com/libp2p/go-yamux/v3/session.go
+++ b/vendor/github.com/libp2p/go-yamux/v4/session.go
@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"log"
"math"
"net"
@@ -23,19 +22,24 @@ import (
// Memory is allocated:
// 1. When opening / accepting a new stream. This uses the highest priority.
// 2. When trying to increase the stream receive window. This uses a lower priority.
+// This is a subset of the libp2p's resource manager ResourceScopeSpan interface.
type MemoryManager interface {
- // ReserveMemory reserves memory / buffer.
ReserveMemory(size int, prio uint8) error
+
// ReleaseMemory explicitly releases memory previously reserved with ReserveMemory
ReleaseMemory(size int)
+
+ // Done ends the span and releases associated resources.
+ Done()
}
type nullMemoryManagerImpl struct{}
func (n nullMemoryManagerImpl) ReserveMemory(size int, prio uint8) error { return nil }
func (n nullMemoryManagerImpl) ReleaseMemory(size int) {}
+func (n nullMemoryManagerImpl) Done() {}
-var nullMemoryManager MemoryManager = &nullMemoryManagerImpl{}
+var nullMemoryManager = &nullMemoryManagerImpl{}
// Session is used to wrap a reliable ordered connection and to
// multiplex it into multiple streams.
@@ -66,7 +70,7 @@ type Session struct {
// reader is a buffered reader
reader io.Reader
- memoryManager MemoryManager
+ newMemoryManager func() (MemoryManager, error)
// pings is used to track inflight pings
pingLock sync.Mutex
@@ -121,31 +125,31 @@ type Session struct {
}
// newSession is used to construct a new session
-func newSession(config *Config, conn net.Conn, client bool, readBuf int, memoryManager MemoryManager) *Session {
+func newSession(config *Config, conn net.Conn, client bool, readBuf int, newMemoryManager func() (MemoryManager, error)) *Session {
var reader io.Reader = conn
if readBuf > 0 {
reader = bufio.NewReaderSize(reader, readBuf)
}
- if memoryManager == nil {
- memoryManager = nullMemoryManager
+ if newMemoryManager == nil {
+ newMemoryManager = func() (MemoryManager, error) { return nullMemoryManager, nil }
}
s := &Session{
- config: config,
- client: client,
- logger: log.New(config.LogOutput, "", log.LstdFlags),
- conn: conn,
- reader: reader,
- streams: make(map[uint32]*Stream),
- inflight: make(map[uint32]struct{}),
- synCh: make(chan struct{}, config.AcceptBacklog),
- acceptCh: make(chan *Stream, config.AcceptBacklog),
- sendCh: make(chan []byte, 64),
- pongCh: make(chan uint32, config.PingBacklog),
- pingCh: make(chan uint32),
- recvDoneCh: make(chan struct{}),
- sendDoneCh: make(chan struct{}),
- shutdownCh: make(chan struct{}),
- memoryManager: memoryManager,
+ config: config,
+ client: client,
+ logger: log.New(config.LogOutput, "", log.LstdFlags),
+ conn: conn,
+ reader: reader,
+ streams: make(map[uint32]*Stream),
+ inflight: make(map[uint32]struct{}),
+ synCh: make(chan struct{}, config.AcceptBacklog),
+ acceptCh: make(chan *Stream, config.AcceptBacklog),
+ sendCh: make(chan []byte, 64),
+ pongCh: make(chan uint32, config.PingBacklog),
+ pingCh: make(chan uint32),
+ recvDoneCh: make(chan struct{}),
+ sendDoneCh: make(chan struct{}),
+ shutdownCh: make(chan struct{}),
+ newMemoryManager: newMemoryManager,
}
if client {
s.nextStreamID = 1
@@ -157,7 +161,7 @@ func newSession(config *Config, conn net.Conn, client bool, readBuf int, memoryM
}
go s.recv()
go s.send()
- go s.measureRTT()
+ go s.startMeasureRTT()
return s
}
@@ -212,7 +216,11 @@ func (s *Session) OpenStream(ctx context.Context) (*Stream, error) {
return nil, s.shutdownErr
}
- if err := s.memoryManager.ReserveMemory(initialStreamWindow, 255); err != nil {
+ span, err := s.newMemoryManager()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create resource scope span: %w", err)
+ }
+ if err := span.ReserveMemory(initialStreamWindow, 255); err != nil {
return nil, err
}
@@ -220,6 +228,7 @@ GET_ID:
// Get an ID, and check for stream exhaustion
id := atomic.LoadUint32(&s.nextStreamID)
if id >= math.MaxUint32-1 {
+ span.Done()
return nil, ErrStreamsExhausted
}
if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
@@ -227,7 +236,7 @@ GET_ID:
}
// Register the stream
- stream := newStream(s, id, streamInit, initialStreamWindow)
+ stream := newStream(s, id, streamInit, initialStreamWindow, span)
s.streamLock.Lock()
s.streams[id] = stream
s.inflight[id] = struct{}{}
@@ -235,6 +244,7 @@ GET_ID:
// Send the window update to create
if err := stream.sendWindowUpdate(); err != nil {
+ defer span.Done()
select {
case <-s.synCh:
default:
@@ -294,14 +304,10 @@ func (s *Session) Close() error {
s.streamLock.Lock()
defer s.streamLock.Unlock()
- var memory int
for id, stream := range s.streams {
- memory += stream.memory
stream.forceClose()
delete(s.streams, id)
- }
- if memory > 0 {
- s.memoryManager.ReleaseMemory(memory)
+ stream.memorySpan.Done()
}
return nil
}
@@ -335,7 +341,25 @@ func (s *Session) measureRTT() {
if err != nil {
return
}
- atomic.StoreInt64(&s.rtt, rtt.Nanoseconds())
+ if !atomic.CompareAndSwapInt64(&s.rtt, 0, rtt.Nanoseconds()) {
+ prev := atomic.LoadInt64(&s.rtt)
+ smoothedRTT := prev/2 + rtt.Nanoseconds()/2
+ atomic.StoreInt64(&s.rtt, smoothedRTT)
+ }
+}
+
+func (s *Session) startMeasureRTT() {
+ s.measureRTT()
+ t := time.NewTicker(s.config.MeasureRTTInterval)
+ defer t.Stop()
+ for {
+ select {
+ case <-s.CloseChan():
+ return
+ case <-t.C:
+ s.measureRTT()
+ }
+ }
}
// 0 if we don't yet have a measurement
@@ -692,7 +716,7 @@ func (s *Session) handleStreamMessage(hdr header) error {
// Drain any data on the wire
if hdr.MsgType() == typeData && hdr.Length() > 0 {
s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
- if _, err := io.CopyN(ioutil.Discard, s.reader, int64(hdr.Length())); err != nil {
+ if _, err := io.CopyN(io.Discard, s.reader, int64(hdr.Length())); err != nil {
s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
return nil
}
@@ -782,10 +806,14 @@ func (s *Session) incomingStream(id uint32) error {
}
// Allocate a new stream
- if err := s.memoryManager.ReserveMemory(initialStreamWindow, 255); err != nil {
+ span, err := s.newMemoryManager()
+ if err != nil {
+ return fmt.Errorf("failed to create resource span: %w", err)
+ }
+ if err := span.ReserveMemory(initialStreamWindow, 255); err != nil {
return err
}
- stream := newStream(s, id, streamSYNReceived, initialStreamWindow)
+ stream := newStream(s, id, streamSYNReceived, initialStreamWindow, span)
s.streamLock.Lock()
defer s.streamLock.Unlock()
@@ -796,14 +824,14 @@ func (s *Session) incomingStream(id uint32) error {
if sendErr := s.sendMsg(s.goAway(goAwayProtoErr), nil, nil); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
- s.memoryManager.ReleaseMemory(stream.memory)
+ span.Done()
return ErrDuplicateStream
}
if s.numIncomingStreams >= s.config.MaxIncomingStreams {
// too many active streams at the same time
s.logger.Printf("[WARN] yamux: MaxIncomingStreams exceeded, forcing stream reset")
- s.memoryManager.ReleaseMemory(stream.memory)
+ defer span.Done()
hdr := encode(typeWindowUpdate, flagRST, id, 0)
return s.sendMsg(hdr, nil, nil)
}
@@ -818,6 +846,7 @@ func (s *Session) incomingStream(id uint32) error {
return nil
default:
// Backlog exceeded! RST the stream
+ defer span.Done()
s.logger.Printf("[WARN] yamux: backlog exceeded, forcing stream reset")
s.deleteStream(id)
hdr := encode(typeWindowUpdate, flagRST, id, 0)
@@ -856,8 +885,8 @@ func (s *Session) deleteStream(id uint32) {
s.numIncomingStreams--
}
}
- s.memoryManager.ReleaseMemory(str.memory)
delete(s.streams, id)
+ str.memorySpan.Done()
}
// establishStream is used to mark a stream that was in the
diff --git a/vendor/github.com/libp2p/go-yamux/v3/spec.md b/vendor/github.com/libp2p/go-yamux/v4/spec.md
similarity index 100%
rename from vendor/github.com/libp2p/go-yamux/v3/spec.md
rename to vendor/github.com/libp2p/go-yamux/v4/spec.md
diff --git a/vendor/github.com/libp2p/go-yamux/v3/stream.go b/vendor/github.com/libp2p/go-yamux/v4/stream.go
similarity index 96%
rename from vendor/github.com/libp2p/go-yamux/v3/stream.go
rename to vendor/github.com/libp2p/go-yamux/v4/stream.go
index 9175268c3..88d551e65 100644
--- a/vendor/github.com/libp2p/go-yamux/v3/stream.go
+++ b/vendor/github.com/libp2p/go-yamux/v4/stream.go
@@ -31,7 +31,7 @@ const (
type Stream struct {
sendWindow uint32
- memory int
+ memorySpan MemoryManager
id uint32
session *Session
@@ -53,15 +53,15 @@ type Stream struct {
// newStream is used to construct a new stream within a given session for an ID.
// It assumes that a memory allocation has been obtained for the initialWindow.
-func newStream(session *Session, id uint32, state streamState, initialWindow uint32) *Stream {
+func newStream(session *Session, id uint32, state streamState, initialWindow uint32, memorySpan MemoryManager) *Stream {
s := &Stream{
id: id,
session: session,
state: state,
sendWindow: initialStreamWindow,
- memory: int(initialWindow),
readDeadline: makePipeDeadline(),
writeDeadline: makePipeDeadline(),
+ memorySpan: memorySpan,
// Initialize the recvBuf with initialStreamWindow, not config.InitialStreamWindowSize.
// The peer isn't allowed to send more data than initialStreamWindow until we've sent
// the first window update (which will grant it up to config.InitialStreamWindowSize).
@@ -229,9 +229,8 @@ func (s *Stream) sendWindowUpdate() error {
}
if recvWindow > s.recvWindow {
grow := recvWindow - s.recvWindow
- if err := s.session.memoryManager.ReserveMemory(int(grow), 128); err == nil {
+ if err := s.memorySpan.ReserveMemory(int(grow), 128); err == nil {
s.recvWindow = recvWindow
- s.memory += int(grow)
_, delta = s.recvBuf.GrowTo(s.recvWindow, true)
}
}
@@ -385,22 +384,24 @@ func (s *Stream) cleanup() {
// based on set flags, if any. Lock must be held
func (s *Stream) processFlags(flags uint16) {
// Close the stream without holding the state lock
- closeStream := false
+ var closeStream bool
defer func() {
if closeStream {
s.cleanup()
}
}()
- s.stateLock.Lock()
- defer s.stateLock.Unlock()
if flags&flagACK == flagACK {
+ s.stateLock.Lock()
if s.state == streamSYNSent {
s.state = streamEstablished
}
+ s.stateLock.Unlock()
s.session.establishStream(s.id)
}
if flags&flagFIN == flagFIN {
+ var notify bool
+ s.stateLock.Lock()
if s.readState == halfOpen {
s.readState = halfClosed
if s.writeState != halfOpen {
@@ -408,10 +409,15 @@ func (s *Stream) processFlags(flags uint16) {
closeStream = true
s.state = streamFinished
}
+ notify = true
+ }
+ s.stateLock.Unlock()
+ if notify {
s.notifyWaiting()
}
}
if flags&flagRST == flagRST {
+ s.stateLock.Lock()
if s.readState == halfOpen {
s.readState = halfReset
}
@@ -419,6 +425,7 @@ func (s *Stream) processFlags(flags uint16) {
s.writeState = halfReset
}
s.state = streamFinished
+ s.stateLock.Unlock()
closeStream = true
s.notifyWaiting()
}
diff --git a/vendor/github.com/libp2p/go-yamux/v3/util.go b/vendor/github.com/libp2p/go-yamux/v4/util.go
similarity index 87%
rename from vendor/github.com/libp2p/go-yamux/v3/util.go
rename to vendor/github.com/libp2p/go-yamux/v4/util.go
index aeccd24b8..62bf0d1c9 100644
--- a/vendor/github.com/libp2p/go-yamux/v3/util.go
+++ b/vendor/github.com/libp2p/go-yamux/v4/util.go
@@ -40,30 +40,28 @@ func min(values ...uint32) uint32 {
// The segmented buffer looks like:
//
-// | data | empty space |
-// < window (10) >
-// < len (5) > < cap (5) >
+// | data | empty space |
+// < window (10) >
+// < len (5) > < cap (5) >
//
// As data is read, the buffer gets updated like so:
//
-// | data | empty space |
-// < window (8) >
-// < len (3) > < cap (5) >
+// | data | empty space |
+// < window (8) >
+// < len (3) > < cap (5) >
//
// It can then grow as follows (given a "max" of 10):
//
-//
-// | data | empty space |
-// < window (10) >
-// < len (3) > < cap (7) >
+// | data | empty space |
+// < window (10) >
+// < len (3) > < cap (7) >
//
// Data can then be written into the empty space, expanding len,
// and shrinking cap:
//
-// | data | empty space |
-// < window (10) >
-// < len (5) > < cap (5) >
-//
+// | data | empty space |
+// < window (10) >
+// < len (5) > < cap (5) >
type segmentedBuffer struct {
cap uint32
len uint32
diff --git a/vendor/github.com/libp2p/go-yamux/v4/version.json b/vendor/github.com/libp2p/go-yamux/v4/version.json
new file mode 100644
index 000000000..2354b515d
--- /dev/null
+++ b/vendor/github.com/libp2p/go-yamux/v4/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v4.0.0"
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/README.md b/vendor/github.com/lucas-clemente/quic-go/README.md
deleted file mode 100644
index fc25e0b48..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# A QUIC implementation in pure Go
-
-
-
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/lucas-clemente/quic-go)](https://pkg.go.dev/github.com/lucas-clemente/quic-go)
-[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/)
-
-quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the [Unreliable Datagram Extension, RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221). It has support for HTTP/3 [RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114).
-
-In addition the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem.
-
-## Guides
-
-*We currently support Go 1.16.x, Go 1.17.x, and Go 1.18.x.*
-
-Running tests:
-
- go test ./...
-
-### QUIC without HTTP/3
-
-Take a look at [this echo example](example/echo/echo.go).
-
-## Usage
-
-### As a server
-
-See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go:
-
-```go
-http.Handle("/", http.FileServer(http.Dir(wwwDir)))
-http3.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil)
-```
-
-### As a client
-
-See the [example client](example/client/main.go). Use a `http3.RoundTripper` as a `Transport` in a `http.Client`.
-
-```go
-http.Client{
- Transport: &http3.RoundTripper{},
-}
-```
-
-## Projects using quic-go
-
-| Project | Description | Stars |
-|------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------|
-| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) |
-| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) |
-| [go-ipfs](https://github.com/ipfs/go-ipfs) | IPFS implementation in go | ![GitHub Repo stars](https://img.shields.io/github/stars/ipfs/go-ipfs?style=flat-square) |
-| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) |
-| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) |
-| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) |
-| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) |
-| [OONI Probe](https://github.com/ooni/probe-cli) | The Open Observatory of Network Interference (OONI) aims to empower decentralized efforts in documenting Internet censorship around the world. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) |
-
-
-## Contributing
-
-We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
diff --git a/vendor/github.com/lucas-clemente/quic-go/closed_conn.go b/vendor/github.com/lucas-clemente/quic-go/closed_conn.go
deleted file mode 100644
index 35c2d7390..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/closed_conn.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package quic
-
-import (
- "sync"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
-)
-
-// A closedLocalConn is a connection that we closed locally.
-// When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame,
-// with an exponential backoff.
-type closedLocalConn struct {
- conn sendConn
- connClosePacket []byte
-
- closeOnce sync.Once
- closeChan chan struct{} // is closed when the connection is closed or destroyed
-
- receivedPackets chan *receivedPacket
- counter uint64 // number of packets received
-
- perspective protocol.Perspective
-
- logger utils.Logger
-}
-
-var _ packetHandler = &closedLocalConn{}
-
-// newClosedLocalConn creates a new closedLocalConn and runs it.
-func newClosedLocalConn(
- conn sendConn,
- connClosePacket []byte,
- perspective protocol.Perspective,
- logger utils.Logger,
-) packetHandler {
- s := &closedLocalConn{
- conn: conn,
- connClosePacket: connClosePacket,
- perspective: perspective,
- logger: logger,
- closeChan: make(chan struct{}),
- receivedPackets: make(chan *receivedPacket, 64),
- }
- go s.run()
- return s
-}
-
-func (s *closedLocalConn) run() {
- for {
- select {
- case p := <-s.receivedPackets:
- s.handlePacketImpl(p)
- case <-s.closeChan:
- return
- }
- }
-}
-
-func (s *closedLocalConn) handlePacket(p *receivedPacket) {
- select {
- case s.receivedPackets <- p:
- default:
- }
-}
-
-func (s *closedLocalConn) handlePacketImpl(_ *receivedPacket) {
- s.counter++
- // exponential backoff
- // only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving
- for n := s.counter; n > 1; n = n / 2 {
- if n%2 != 0 {
- return
- }
- }
- s.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", s.counter)
- if err := s.conn.Write(s.connClosePacket); err != nil {
- s.logger.Debugf("Error retransmitting CONNECTION_CLOSE: %s", err)
- }
-}
-
-func (s *closedLocalConn) shutdown() {
- s.destroy(nil)
-}
-
-func (s *closedLocalConn) destroy(error) {
- s.closeOnce.Do(func() {
- close(s.closeChan)
- })
-}
-
-func (s *closedLocalConn) getPerspective() protocol.Perspective {
- return s.perspective
-}
-
-// A closedRemoteConn is a connection that was closed remotely.
-// For such a connection, we might receive reordered packets that were sent before the CONNECTION_CLOSE.
-// We can just ignore those packets.
-type closedRemoteConn struct {
- perspective protocol.Perspective
-}
-
-var _ packetHandler = &closedRemoteConn{}
-
-func newClosedRemoteConn(pers protocol.Perspective) packetHandler {
- return &closedRemoteConn{perspective: pers}
-}
-
-func (s *closedRemoteConn) handlePacket(*receivedPacket) {}
-func (s *closedRemoteConn) shutdown() {}
-func (s *closedRemoteConn) destroy(error) {}
-func (s *closedRemoteConn) getPerspective() protocol.Perspective { return s.perspective }
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go
deleted file mode 100644
index 26291321c..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package ackhandler
-
-import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/logging"
-)
-
-// NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler
-func NewAckHandler(
- initialPacketNumber protocol.PacketNumber,
- initialMaxDatagramSize protocol.ByteCount,
- rttStats *utils.RTTStats,
- pers protocol.Perspective,
- tracer logging.ConnectionTracer,
- logger utils.Logger,
- version protocol.VersionNumber,
-) (SentPacketHandler, ReceivedPacketHandler) {
- sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, pers, tracer, logger)
- return sph, newReceivedPacketHandler(sph, rttStats, logger, version)
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go
deleted file mode 100644
index aed6038d9..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package ackhandler
-
-import "github.com/lucas-clemente/quic-go/internal/wire"
-
-type Frame struct {
- wire.Frame // nil if the frame has already been acknowledged in another packet
- OnLost func(wire.Frame)
- OnAcked func(wire.Frame)
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go
deleted file mode 100644
index 32235f81a..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package ackhandler
-
-//go:generate genny -pkg ackhandler -in ../utils/linkedlist/linkedlist.go -out packet_linkedlist.go gen Item=Packet
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go
deleted file mode 100644
index e957d253a..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package ackhandler
-
-//go:generate sh -c "../../mockgen_private.sh ackhandler mock_sent_packet_tracker_test.go github.com/lucas-clemente/quic-go/internal/ackhandler sentPacketTracker"
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go
deleted file mode 100644
index bb74f4ef9..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package ackhandler
-
-// Linked list implementation from the Go standard library.
-
-// PacketElement is an element of a linked list.
-type PacketElement struct {
- // Next and previous pointers in the doubly-linked list of elements.
- // To simplify the implementation, internally a list l is implemented
- // as a ring, such that &l.root is both the next element of the last
- // list element (l.Back()) and the previous element of the first list
- // element (l.Front()).
- next, prev *PacketElement
-
- // The list to which this element belongs.
- list *PacketList
-
- // The value stored with this element.
- Value Packet
-}
-
-// Next returns the next list element or nil.
-func (e *PacketElement) Next() *PacketElement {
- if p := e.next; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// Prev returns the previous list element or nil.
-func (e *PacketElement) Prev() *PacketElement {
- if p := e.prev; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// PacketList is a linked list of Packets.
-type PacketList struct {
- root PacketElement // sentinel list element, only &root, root.prev, and root.next are used
- len int // current list length excluding (this) sentinel element
-}
-
-// Init initializes or clears list l.
-func (l *PacketList) Init() *PacketList {
- l.root.next = &l.root
- l.root.prev = &l.root
- l.len = 0
- return l
-}
-
-// NewPacketList returns an initialized list.
-func NewPacketList() *PacketList { return new(PacketList).Init() }
-
-// Len returns the number of elements of list l.
-// The complexity is O(1).
-func (l *PacketList) Len() int { return l.len }
-
-// Front returns the first element of list l or nil if the list is empty.
-func (l *PacketList) Front() *PacketElement {
- if l.len == 0 {
- return nil
- }
- return l.root.next
-}
-
-// Back returns the last element of list l or nil if the list is empty.
-func (l *PacketList) Back() *PacketElement {
- if l.len == 0 {
- return nil
- }
- return l.root.prev
-}
-
-// lazyInit lazily initializes a zero List value.
-func (l *PacketList) lazyInit() {
- if l.root.next == nil {
- l.Init()
- }
-}
-
-// insert inserts e after at, increments l.len, and returns e.
-func (l *PacketList) insert(e, at *PacketElement) *PacketElement {
- n := at.next
- at.next = e
- e.prev = at
- e.next = n
- n.prev = e
- e.list = l
- l.len++
- return e
-}
-
-// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
-func (l *PacketList) insertValue(v Packet, at *PacketElement) *PacketElement {
- return l.insert(&PacketElement{Value: v}, at)
-}
-
-// remove removes e from its list, decrements l.len, and returns e.
-func (l *PacketList) remove(e *PacketElement) *PacketElement {
- e.prev.next = e.next
- e.next.prev = e.prev
- e.next = nil // avoid memory leaks
- e.prev = nil // avoid memory leaks
- e.list = nil
- l.len--
- return e
-}
-
-// Remove removes e from l if e is an element of list l.
-// It returns the element value e.Value.
-// The element must not be nil.
-func (l *PacketList) Remove(e *PacketElement) Packet {
- if e.list == l {
- // if e.list == l, l must have been initialized when e was inserted
- // in l or l == nil (e is a zero Element) and l.remove will crash
- l.remove(e)
- }
- return e.Value
-}
-
-// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *PacketList) PushFront(v Packet) *PacketElement {
- l.lazyInit()
- return l.insertValue(v, &l.root)
-}
-
-// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *PacketList) PushBack(v Packet) *PacketElement {
- l.lazyInit()
- return l.insertValue(v, l.root.prev)
-}
-
-// InsertBefore inserts a new element e with value v immediately before mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *PacketList) InsertBefore(v Packet, mark *PacketElement) *PacketElement {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark.prev)
-}
-
-// InsertAfter inserts a new element e with value v immediately after mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *PacketList) InsertAfter(v Packet, mark *PacketElement) *PacketElement {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark)
-}
-
-// MoveToFront moves element e to the front of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *PacketList) MoveToFront(e *PacketElement) {
- if e.list != l || l.root.next == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.insert(l.remove(e), &l.root)
-}
-
-// MoveToBack moves element e to the back of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *PacketList) MoveToBack(e *PacketElement) {
- if e.list != l || l.root.prev == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.insert(l.remove(e), l.root.prev)
-}
-
-// MoveBefore moves element e to its new position before mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *PacketList) MoveBefore(e, mark *PacketElement) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.insert(l.remove(e), mark.prev)
-}
-
-// MoveAfter moves element e to its new position after mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *PacketList) MoveAfter(e, mark *PacketElement) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.insert(l.remove(e), mark)
-}
-
-// PushBackList inserts a copy of an other list at the back of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *PacketList) PushBackList(other *PacketList) {
- l.lazyInit()
- for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
- l.insertValue(e.Value, l.root.prev)
- }
-}
-
-// PushFrontList inserts a copy of an other list at the front of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *PacketList) PushFrontList(other *PacketList) {
- l.lazyInit()
- for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
- l.insertValue(e.Value, &l.root)
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go
deleted file mode 100644
index 36489367d..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package ackhandler
-
-import (
- "fmt"
- "time"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
-)
-
-type sentPacketHistory struct {
- rttStats *utils.RTTStats
- packetList *PacketList
- packetMap map[protocol.PacketNumber]*PacketElement
- highestSent protocol.PacketNumber
-}
-
-func newSentPacketHistory(rttStats *utils.RTTStats) *sentPacketHistory {
- return &sentPacketHistory{
- rttStats: rttStats,
- packetList: NewPacketList(),
- packetMap: make(map[protocol.PacketNumber]*PacketElement),
- highestSent: protocol.InvalidPacketNumber,
- }
-}
-
-func (h *sentPacketHistory) SentPacket(p *Packet, isAckEliciting bool) {
- if p.PacketNumber <= h.highestSent {
- panic("non-sequential packet number use")
- }
- // Skipped packet numbers.
- for pn := h.highestSent + 1; pn < p.PacketNumber; pn++ {
- el := h.packetList.PushBack(Packet{
- PacketNumber: pn,
- EncryptionLevel: p.EncryptionLevel,
- SendTime: p.SendTime,
- skippedPacket: true,
- })
- h.packetMap[pn] = el
- }
- h.highestSent = p.PacketNumber
-
- if isAckEliciting {
- el := h.packetList.PushBack(*p)
- h.packetMap[p.PacketNumber] = el
- }
-}
-
-// Iterate iterates through all packets.
-func (h *sentPacketHistory) Iterate(cb func(*Packet) (cont bool, err error)) error {
- cont := true
- var next *PacketElement
- for el := h.packetList.Front(); cont && el != nil; el = next {
- var err error
- next = el.Next()
- cont, err = cb(&el.Value)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// FirstOutStanding returns the first outstanding packet.
-func (h *sentPacketHistory) FirstOutstanding() *Packet {
- for el := h.packetList.Front(); el != nil; el = el.Next() {
- p := &el.Value
- if !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket {
- return p
- }
- }
- return nil
-}
-
-func (h *sentPacketHistory) Len() int {
- return len(h.packetMap)
-}
-
-func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error {
- el, ok := h.packetMap[p]
- if !ok {
- return fmt.Errorf("packet %d not found in sent packet history", p)
- }
- h.packetList.Remove(el)
- delete(h.packetMap, p)
- return nil
-}
-
-func (h *sentPacketHistory) HasOutstandingPackets() bool {
- return h.FirstOutstanding() != nil
-}
-
-func (h *sentPacketHistory) DeleteOldPackets(now time.Time) {
- maxAge := 3 * h.rttStats.PTO(false)
- var nextEl *PacketElement
- for el := h.packetList.Front(); el != nil; el = nextEl {
- nextEl = el.Next()
- p := el.Value
- if p.SendTime.After(now.Add(-maxAge)) {
- break
- }
- if !p.skippedPacket && !p.declaredLost { // should only happen in the case of drastic RTT changes
- continue
- }
- delete(h.packetMap, p.PacketNumber)
- h.packetList.Remove(el)
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go
deleted file mode 100644
index c7a8d13ee..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package handshake
-
-//go:generate sh -c "../../mockgen_private.sh handshake mock_handshake_runner_test.go github.com/lucas-clemente/quic-go/internal/handshake handshakeRunner"
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go
deleted file mode 100644
index b7cb20c15..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package handshake
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/cipher"
- "fmt"
- "sync"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
-)
-
-var (
- oldRetryAEAD cipher.AEAD // used for QUIC draft versions up to 34
- retryAEAD cipher.AEAD // used for QUIC draft-34
-)
-
-func init() {
- oldRetryAEAD = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1})
- retryAEAD = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
-}
-
-func initAEAD(key [16]byte) cipher.AEAD {
- aes, err := aes.NewCipher(key[:])
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
- return aead
-}
-
-var (
- retryBuf bytes.Buffer
- retryMutex sync.Mutex
- oldRetryNonce = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}
- retryNonce = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}
-)
-
-// GetRetryIntegrityTag calculates the integrity tag on a Retry packet
-func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte {
- retryMutex.Lock()
- retryBuf.WriteByte(uint8(origDestConnID.Len()))
- retryBuf.Write(origDestConnID.Bytes())
- retryBuf.Write(retry)
-
- var tag [16]byte
- var sealed []byte
- if version != protocol.Version1 {
- sealed = oldRetryAEAD.Seal(tag[:0], oldRetryNonce[:], nil, retryBuf.Bytes())
- } else {
- sealed = retryAEAD.Seal(tag[:0], retryNonce[:], nil, retryBuf.Bytes())
- }
- if len(sealed) != 16 {
- panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed)))
- }
- retryBuf.Reset()
- retryMutex.Unlock()
- return &tag
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go
deleted file mode 100644
index 3aec2cd38..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package protocol
-
-import (
- "bytes"
- "crypto/rand"
- "fmt"
- "io"
-)
-
-// A ConnectionID in QUIC
-type ConnectionID []byte
-
-const maxConnectionIDLen = 20
-
-// GenerateConnectionID generates a connection ID using cryptographic random
-func GenerateConnectionID(len int) (ConnectionID, error) {
- b := make([]byte, len)
- if _, err := rand.Read(b); err != nil {
- return nil, err
- }
- return ConnectionID(b), nil
-}
-
-// GenerateConnectionIDForInitial generates a connection ID for the Initial packet.
-// It uses a length randomly chosen between 8 and 20 bytes.
-func GenerateConnectionIDForInitial() (ConnectionID, error) {
- r := make([]byte, 1)
- if _, err := rand.Read(r); err != nil {
- return nil, err
- }
- len := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1)
- return GenerateConnectionID(len)
-}
-
-// ReadConnectionID reads a connection ID of length len from the given io.Reader.
-// It returns io.EOF if there are not enough bytes to read.
-func ReadConnectionID(r io.Reader, len int) (ConnectionID, error) {
- if len == 0 {
- return nil, nil
- }
- c := make(ConnectionID, len)
- _, err := io.ReadFull(r, c)
- if err == io.ErrUnexpectedEOF {
- return nil, io.EOF
- }
- return c, err
-}
-
-// Equal says if two connection IDs are equal
-func (c ConnectionID) Equal(other ConnectionID) bool {
- return bytes.Equal(c, other)
-}
-
-// Len returns the length of the connection ID in bytes
-func (c ConnectionID) Len() int {
- return len(c)
-}
-
-// Bytes returns the byte representation
-func (c ConnectionID) Bytes() []byte {
- return []byte(c)
-}
-
-func (c ConnectionID) String() string {
- if c.Len() == 0 {
- return "(empty)"
- }
- return fmt.Sprintf("%x", c.Bytes())
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go116.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go116.go
deleted file mode 100644
index e3024624c..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go116.go
+++ /dev/null
@@ -1,100 +0,0 @@
-//go:build go1.16 && !go1.17
-// +build go1.16,!go1.17
-
-package qtls
-
-import (
- "crypto"
- "crypto/cipher"
- "crypto/tls"
- "net"
- "unsafe"
-
- "github.com/marten-seemann/qtls-go1-16"
-)
-
-type (
- // Alert is a TLS alert
- Alert = qtls.Alert
- // A Certificate is qtls.Certificate.
- Certificate = qtls.Certificate
- // CertificateRequestInfo contains inforamtion about a certificate request.
- CertificateRequestInfo = qtls.CertificateRequestInfo
- // A CipherSuiteTLS13 is a cipher suite for TLS 1.3
- CipherSuiteTLS13 = qtls.CipherSuiteTLS13
- // ClientHelloInfo contains information about a ClientHello.
- ClientHelloInfo = qtls.ClientHelloInfo
- // ClientSessionCache is a cache used for session resumption.
- ClientSessionCache = qtls.ClientSessionCache
- // ClientSessionState is a state needed for session resumption.
- ClientSessionState = qtls.ClientSessionState
- // A Config is a qtls.Config.
- Config = qtls.Config
- // A Conn is a qtls.Conn.
- Conn = qtls.Conn
- // ConnectionState contains information about the state of the connection.
- ConnectionState = qtls.ConnectionStateWith0RTT
- // EncryptionLevel is the encryption level of a message.
- EncryptionLevel = qtls.EncryptionLevel
- // Extension is a TLS extension
- Extension = qtls.Extension
- // ExtraConfig is the qtls.ExtraConfig
- ExtraConfig = qtls.ExtraConfig
- // RecordLayer is a qtls RecordLayer.
- RecordLayer = qtls.RecordLayer
-)
-
-const (
- // EncryptionHandshake is the Handshake encryption level
- EncryptionHandshake = qtls.EncryptionHandshake
- // Encryption0RTT is the 0-RTT encryption level
- Encryption0RTT = qtls.Encryption0RTT
- // EncryptionApplication is the application data encryption level
- EncryptionApplication = qtls.EncryptionApplication
-)
-
-// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
-func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
- return qtls.AEADAESGCMTLS13(key, fixedNonce)
-}
-
-// Client returns a new TLS client side connection.
-func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- return qtls.Client(conn, config, extraConfig)
-}
-
-// Server returns a new TLS server side connection.
-func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- return qtls.Server(conn, config, extraConfig)
-}
-
-func GetConnectionState(conn *Conn) ConnectionState {
- return conn.ConnectionStateWith0RTT()
-}
-
-// ToTLSConnectionState extracts the tls.ConnectionState
-func ToTLSConnectionState(cs ConnectionState) tls.ConnectionState {
- return cs.ConnectionState
-}
-
-type cipherSuiteTLS13 struct {
- ID uint16
- KeyLen int
- AEAD func(key, fixedNonce []byte) cipher.AEAD
- Hash crypto.Hash
-}
-
-//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-16.cipherSuiteTLS13ByID
-func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13
-
-// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite.
-func CipherSuiteTLS13ByID(id uint16) *CipherSuiteTLS13 {
- val := cipherSuiteTLS13ByID(id)
- cs := (*cipherSuiteTLS13)(unsafe.Pointer(val))
- return &qtls.CipherSuiteTLS13{
- ID: cs.ID,
- KeyLen: cs.KeyLen,
- AEAD: cs.AEAD,
- Hash: cs.Hash,
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go117.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go117.go
deleted file mode 100644
index bc385f194..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go117.go
+++ /dev/null
@@ -1,100 +0,0 @@
-//go:build go1.17 && !go1.18
-// +build go1.17,!go1.18
-
-package qtls
-
-import (
- "crypto"
- "crypto/cipher"
- "crypto/tls"
- "net"
- "unsafe"
-
- "github.com/marten-seemann/qtls-go1-17"
-)
-
-type (
- // Alert is a TLS alert
- Alert = qtls.Alert
- // A Certificate is qtls.Certificate.
- Certificate = qtls.Certificate
- // CertificateRequestInfo contains inforamtion about a certificate request.
- CertificateRequestInfo = qtls.CertificateRequestInfo
- // A CipherSuiteTLS13 is a cipher suite for TLS 1.3
- CipherSuiteTLS13 = qtls.CipherSuiteTLS13
- // ClientHelloInfo contains information about a ClientHello.
- ClientHelloInfo = qtls.ClientHelloInfo
- // ClientSessionCache is a cache used for session resumption.
- ClientSessionCache = qtls.ClientSessionCache
- // ClientSessionState is a state needed for session resumption.
- ClientSessionState = qtls.ClientSessionState
- // A Config is a qtls.Config.
- Config = qtls.Config
- // A Conn is a qtls.Conn.
- Conn = qtls.Conn
- // ConnectionState contains information about the state of the connection.
- ConnectionState = qtls.ConnectionStateWith0RTT
- // EncryptionLevel is the encryption level of a message.
- EncryptionLevel = qtls.EncryptionLevel
- // Extension is a TLS extension
- Extension = qtls.Extension
- // ExtraConfig is the qtls.ExtraConfig
- ExtraConfig = qtls.ExtraConfig
- // RecordLayer is a qtls RecordLayer.
- RecordLayer = qtls.RecordLayer
-)
-
-const (
- // EncryptionHandshake is the Handshake encryption level
- EncryptionHandshake = qtls.EncryptionHandshake
- // Encryption0RTT is the 0-RTT encryption level
- Encryption0RTT = qtls.Encryption0RTT
- // EncryptionApplication is the application data encryption level
- EncryptionApplication = qtls.EncryptionApplication
-)
-
-// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
-func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
- return qtls.AEADAESGCMTLS13(key, fixedNonce)
-}
-
-// Client returns a new TLS client side connection.
-func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- return qtls.Client(conn, config, extraConfig)
-}
-
-// Server returns a new TLS server side connection.
-func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- return qtls.Server(conn, config, extraConfig)
-}
-
-func GetConnectionState(conn *Conn) ConnectionState {
- return conn.ConnectionStateWith0RTT()
-}
-
-// ToTLSConnectionState extracts the tls.ConnectionState
-func ToTLSConnectionState(cs ConnectionState) tls.ConnectionState {
- return cs.ConnectionState
-}
-
-type cipherSuiteTLS13 struct {
- ID uint16
- KeyLen int
- AEAD func(key, fixedNonce []byte) cipher.AEAD
- Hash crypto.Hash
-}
-
-//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-17.cipherSuiteTLS13ByID
-func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13
-
-// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite.
-func CipherSuiteTLS13ByID(id uint16) *CipherSuiteTLS13 {
- val := cipherSuiteTLS13ByID(id)
- cs := (*cipherSuiteTLS13)(unsafe.Pointer(val))
- return &qtls.CipherSuiteTLS13{
- ID: cs.ID,
- KeyLen: cs.KeyLen,
- AEAD: cs.AEAD,
- Hash: cs.Hash,
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go
deleted file mode 100644
index f8d59d8bb..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go
+++ /dev/null
@@ -1,6 +0,0 @@
-//go:build go1.20
-// +build go1.20
-
-package qtls
-
-var _ int = "The version of quic-go you're using can't be built on Go 1.20 yet. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions."
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go
deleted file mode 100644
index 384d719c6..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build (go1.9 || go1.10 || go1.11 || go1.12 || go1.13 || go1.14 || go1.15) && !go1.16
-// +build go1.9 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15
-// +build !go1.16
-
-package qtls
-
-var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions."
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go
deleted file mode 100644
index cf4642504..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package utils
-
-import "sync/atomic"
-
-// An AtomicBool is an atomic bool
-type AtomicBool struct {
- v int32
-}
-
-// Set sets the value
-func (a *AtomicBool) Set(value bool) {
- var n int32
- if value {
- n = 1
- }
- atomic.StoreInt32(&a.v, n)
-}
-
-// Get gets the value
-func (a *AtomicBool) Get() bool {
- return atomic.LoadInt32(&a.v) != 0
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go
deleted file mode 100644
index 8a63e9589..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package utils
-
-//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out byteinterval_linkedlist.go gen Item=ByteInterval
-//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out packetinterval_linkedlist.go gen Item=PacketInterval
-//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out newconnectionid_linkedlist.go gen Item=NewConnectionID
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go
deleted file mode 100644
index ee1f85f62..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package utils
-
-import (
- "math"
- "time"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
-)
-
-// InfDuration is a duration of infinite length
-const InfDuration = time.Duration(math.MaxInt64)
-
-// Max returns the maximum of two Ints
-func Max(a, b int) int {
- if a < b {
- return b
- }
- return a
-}
-
-// MaxUint32 returns the maximum of two uint32
-func MaxUint32(a, b uint32) uint32 {
- if a < b {
- return b
- }
- return a
-}
-
-// MaxUint64 returns the maximum of two uint64
-func MaxUint64(a, b uint64) uint64 {
- if a < b {
- return b
- }
- return a
-}
-
-// MinUint64 returns the maximum of two uint64
-func MinUint64(a, b uint64) uint64 {
- if a < b {
- return a
- }
- return b
-}
-
-// Min returns the minimum of two Ints
-func Min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-// MinUint32 returns the maximum of two uint32
-func MinUint32(a, b uint32) uint32 {
- if a < b {
- return a
- }
- return b
-}
-
-// MinInt64 returns the minimum of two int64
-func MinInt64(a, b int64) int64 {
- if a < b {
- return a
- }
- return b
-}
-
-// MaxInt64 returns the minimum of two int64
-func MaxInt64(a, b int64) int64 {
- if a > b {
- return a
- }
- return b
-}
-
-// MinByteCount returns the minimum of two ByteCounts
-func MinByteCount(a, b protocol.ByteCount) protocol.ByteCount {
- if a < b {
- return a
- }
- return b
-}
-
-// MaxByteCount returns the maximum of two ByteCounts
-func MaxByteCount(a, b protocol.ByteCount) protocol.ByteCount {
- if a < b {
- return b
- }
- return a
-}
-
-// MaxDuration returns the max duration
-func MaxDuration(a, b time.Duration) time.Duration {
- if a > b {
- return a
- }
- return b
-}
-
-// MinDuration returns the minimum duration
-func MinDuration(a, b time.Duration) time.Duration {
- if a > b {
- return b
- }
- return a
-}
-
-// MinNonZeroDuration return the minimum duration that's not zero.
-func MinNonZeroDuration(a, b time.Duration) time.Duration {
- if a == 0 {
- return b
- }
- if b == 0 {
- return a
- }
- return MinDuration(a, b)
-}
-
-// AbsDuration returns the absolute value of a time duration
-func AbsDuration(d time.Duration) time.Duration {
- if d >= 0 {
- return d
- }
- return -d
-}
-
-// MinTime returns the earlier time
-func MinTime(a, b time.Time) time.Time {
- if a.After(b) {
- return b
- }
- return a
-}
-
-// MinNonZeroTime returns the earlist time that is not time.Time{}
-// If both a and b are time.Time{}, it returns time.Time{}
-func MinNonZeroTime(a, b time.Time) time.Time {
- if a.IsZero() {
- return b
- }
- if b.IsZero() {
- return a
- }
- return MinTime(a, b)
-}
-
-// MaxTime returns the later time
-func MaxTime(a, b time.Time) time.Time {
- if a.After(b) {
- return a
- }
- return b
-}
-
-// MaxPacketNumber returns the max packet number
-func MaxPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber {
- if a > b {
- return a
- }
- return b
-}
-
-// MinPacketNumber returns the min packet number
-func MinPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/new_connection_id.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/new_connection_id.go
deleted file mode 100644
index 694ee7aaf..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/new_connection_id.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package utils
-
-import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
-)
-
-// NewConnectionID is a new connection ID
-type NewConnectionID struct {
- SequenceNumber uint64
- ConnectionID protocol.ConnectionID
- StatelessResetToken protocol.StatelessResetToken
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/newconnectionid_linkedlist.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/newconnectionid_linkedlist.go
deleted file mode 100644
index d59562e53..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/newconnectionid_linkedlist.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package utils
-
-// Linked list implementation from the Go standard library.
-
-// NewConnectionIDElement is an element of a linked list.
-type NewConnectionIDElement struct {
- // Next and previous pointers in the doubly-linked list of elements.
- // To simplify the implementation, internally a list l is implemented
- // as a ring, such that &l.root is both the next element of the last
- // list element (l.Back()) and the previous element of the first list
- // element (l.Front()).
- next, prev *NewConnectionIDElement
-
- // The list to which this element belongs.
- list *NewConnectionIDList
-
- // The value stored with this element.
- Value NewConnectionID
-}
-
-// Next returns the next list element or nil.
-func (e *NewConnectionIDElement) Next() *NewConnectionIDElement {
- if p := e.next; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// Prev returns the previous list element or nil.
-func (e *NewConnectionIDElement) Prev() *NewConnectionIDElement {
- if p := e.prev; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// NewConnectionIDList is a linked list of NewConnectionIDs.
-type NewConnectionIDList struct {
- root NewConnectionIDElement // sentinel list element, only &root, root.prev, and root.next are used
- len int // current list length excluding (this) sentinel element
-}
-
-// Init initializes or clears list l.
-func (l *NewConnectionIDList) Init() *NewConnectionIDList {
- l.root.next = &l.root
- l.root.prev = &l.root
- l.len = 0
- return l
-}
-
-// NewNewConnectionIDList returns an initialized list.
-func NewNewConnectionIDList() *NewConnectionIDList { return new(NewConnectionIDList).Init() }
-
-// Len returns the number of elements of list l.
-// The complexity is O(1).
-func (l *NewConnectionIDList) Len() int { return l.len }
-
-// Front returns the first element of list l or nil if the list is empty.
-func (l *NewConnectionIDList) Front() *NewConnectionIDElement {
- if l.len == 0 {
- return nil
- }
- return l.root.next
-}
-
-// Back returns the last element of list l or nil if the list is empty.
-func (l *NewConnectionIDList) Back() *NewConnectionIDElement {
- if l.len == 0 {
- return nil
- }
- return l.root.prev
-}
-
-// lazyInit lazily initializes a zero List value.
-func (l *NewConnectionIDList) lazyInit() {
- if l.root.next == nil {
- l.Init()
- }
-}
-
-// insert inserts e after at, increments l.len, and returns e.
-func (l *NewConnectionIDList) insert(e, at *NewConnectionIDElement) *NewConnectionIDElement {
- n := at.next
- at.next = e
- e.prev = at
- e.next = n
- n.prev = e
- e.list = l
- l.len++
- return e
-}
-
-// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
-func (l *NewConnectionIDList) insertValue(v NewConnectionID, at *NewConnectionIDElement) *NewConnectionIDElement {
- return l.insert(&NewConnectionIDElement{Value: v}, at)
-}
-
-// remove removes e from its list, decrements l.len, and returns e.
-func (l *NewConnectionIDList) remove(e *NewConnectionIDElement) *NewConnectionIDElement {
- e.prev.next = e.next
- e.next.prev = e.prev
- e.next = nil // avoid memory leaks
- e.prev = nil // avoid memory leaks
- e.list = nil
- l.len--
- return e
-}
-
-// Remove removes e from l if e is an element of list l.
-// It returns the element value e.Value.
-// The element must not be nil.
-func (l *NewConnectionIDList) Remove(e *NewConnectionIDElement) NewConnectionID {
- if e.list == l {
- // if e.list == l, l must have been initialized when e was inserted
- // in l or l == nil (e is a zero Element) and l.remove will crash
- l.remove(e)
- }
- return e.Value
-}
-
-// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *NewConnectionIDList) PushFront(v NewConnectionID) *NewConnectionIDElement {
- l.lazyInit()
- return l.insertValue(v, &l.root)
-}
-
-// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *NewConnectionIDList) PushBack(v NewConnectionID) *NewConnectionIDElement {
- l.lazyInit()
- return l.insertValue(v, l.root.prev)
-}
-
-// InsertBefore inserts a new element e with value v immediately before mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *NewConnectionIDList) InsertBefore(v NewConnectionID, mark *NewConnectionIDElement) *NewConnectionIDElement {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark.prev)
-}
-
-// InsertAfter inserts a new element e with value v immediately after mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *NewConnectionIDList) InsertAfter(v NewConnectionID, mark *NewConnectionIDElement) *NewConnectionIDElement {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark)
-}
-
-// MoveToFront moves element e to the front of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *NewConnectionIDList) MoveToFront(e *NewConnectionIDElement) {
- if e.list != l || l.root.next == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.insert(l.remove(e), &l.root)
-}
-
-// MoveToBack moves element e to the back of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *NewConnectionIDList) MoveToBack(e *NewConnectionIDElement) {
- if e.list != l || l.root.prev == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.insert(l.remove(e), l.root.prev)
-}
-
-// MoveBefore moves element e to its new position before mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *NewConnectionIDList) MoveBefore(e, mark *NewConnectionIDElement) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.insert(l.remove(e), mark.prev)
-}
-
-// MoveAfter moves element e to its new position after mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *NewConnectionIDList) MoveAfter(e, mark *NewConnectionIDElement) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.insert(l.remove(e), mark)
-}
-
-// PushBackList inserts a copy of an other list at the back of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *NewConnectionIDList) PushBackList(other *NewConnectionIDList) {
- l.lazyInit()
- for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
- l.insertValue(e.Value, l.root.prev)
- }
-}
-
-// PushFrontList inserts a copy of an other list at the front of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *NewConnectionIDList) PushFrontList(other *NewConnectionIDList) {
- l.lazyInit()
- for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
- l.insertValue(e.Value, &l.root)
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go
deleted file mode 100644
index 62cc8b9cb..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package utils
-
-import "github.com/lucas-clemente/quic-go/internal/protocol"
-
-// PacketInterval is an interval from one PacketNumber to the other
-type PacketInterval struct {
- Start protocol.PacketNumber
- End protocol.PacketNumber
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go
deleted file mode 100644
index b461e85a9..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package utils
-
-// Linked list implementation from the Go standard library.
-
-// PacketIntervalElement is an element of a linked list.
-type PacketIntervalElement struct {
- // Next and previous pointers in the doubly-linked list of elements.
- // To simplify the implementation, internally a list l is implemented
- // as a ring, such that &l.root is both the next element of the last
- // list element (l.Back()) and the previous element of the first list
- // element (l.Front()).
- next, prev *PacketIntervalElement
-
- // The list to which this element belongs.
- list *PacketIntervalList
-
- // The value stored with this element.
- Value PacketInterval
-}
-
-// Next returns the next list element or nil.
-func (e *PacketIntervalElement) Next() *PacketIntervalElement {
- if p := e.next; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// Prev returns the previous list element or nil.
-func (e *PacketIntervalElement) Prev() *PacketIntervalElement {
- if p := e.prev; e.list != nil && p != &e.list.root {
- return p
- }
- return nil
-}
-
-// PacketIntervalList is a linked list of PacketIntervals.
-type PacketIntervalList struct {
- root PacketIntervalElement // sentinel list element, only &root, root.prev, and root.next are used
- len int // current list length excluding (this) sentinel element
-}
-
-// Init initializes or clears list l.
-func (l *PacketIntervalList) Init() *PacketIntervalList {
- l.root.next = &l.root
- l.root.prev = &l.root
- l.len = 0
- return l
-}
-
-// NewPacketIntervalList returns an initialized list.
-func NewPacketIntervalList() *PacketIntervalList { return new(PacketIntervalList).Init() }
-
-// Len returns the number of elements of list l.
-// The complexity is O(1).
-func (l *PacketIntervalList) Len() int { return l.len }
-
-// Front returns the first element of list l or nil if the list is empty.
-func (l *PacketIntervalList) Front() *PacketIntervalElement {
- if l.len == 0 {
- return nil
- }
- return l.root.next
-}
-
-// Back returns the last element of list l or nil if the list is empty.
-func (l *PacketIntervalList) Back() *PacketIntervalElement {
- if l.len == 0 {
- return nil
- }
- return l.root.prev
-}
-
-// lazyInit lazily initializes a zero List value.
-func (l *PacketIntervalList) lazyInit() {
- if l.root.next == nil {
- l.Init()
- }
-}
-
-// insert inserts e after at, increments l.len, and returns e.
-func (l *PacketIntervalList) insert(e, at *PacketIntervalElement) *PacketIntervalElement {
- n := at.next
- at.next = e
- e.prev = at
- e.next = n
- n.prev = e
- e.list = l
- l.len++
- return e
-}
-
-// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
-func (l *PacketIntervalList) insertValue(v PacketInterval, at *PacketIntervalElement) *PacketIntervalElement {
- return l.insert(&PacketIntervalElement{Value: v}, at)
-}
-
-// remove removes e from its list, decrements l.len, and returns e.
-func (l *PacketIntervalList) remove(e *PacketIntervalElement) *PacketIntervalElement {
- e.prev.next = e.next
- e.next.prev = e.prev
- e.next = nil // avoid memory leaks
- e.prev = nil // avoid memory leaks
- e.list = nil
- l.len--
- return e
-}
-
-// Remove removes e from l if e is an element of list l.
-// It returns the element value e.Value.
-// The element must not be nil.
-func (l *PacketIntervalList) Remove(e *PacketIntervalElement) PacketInterval {
- if e.list == l {
- // if e.list == l, l must have been initialized when e was inserted
- // in l or l == nil (e is a zero Element) and l.remove will crash
- l.remove(e)
- }
- return e.Value
-}
-
-// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *PacketIntervalList) PushFront(v PacketInterval) *PacketIntervalElement {
- l.lazyInit()
- return l.insertValue(v, &l.root)
-}
-
-// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *PacketIntervalList) PushBack(v PacketInterval) *PacketIntervalElement {
- l.lazyInit()
- return l.insertValue(v, l.root.prev)
-}
-
-// InsertBefore inserts a new element e with value v immediately before mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *PacketIntervalList) InsertBefore(v PacketInterval, mark *PacketIntervalElement) *PacketIntervalElement {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark.prev)
-}
-
-// InsertAfter inserts a new element e with value v immediately after mark and returns e.
-// If mark is not an element of l, the list is not modified.
-// The mark must not be nil.
-func (l *PacketIntervalList) InsertAfter(v PacketInterval, mark *PacketIntervalElement) *PacketIntervalElement {
- if mark.list != l {
- return nil
- }
- // see comment in List.Remove about initialization of l
- return l.insertValue(v, mark)
-}
-
-// MoveToFront moves element e to the front of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *PacketIntervalList) MoveToFront(e *PacketIntervalElement) {
- if e.list != l || l.root.next == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.insert(l.remove(e), &l.root)
-}
-
-// MoveToBack moves element e to the back of list l.
-// If e is not an element of l, the list is not modified.
-// The element must not be nil.
-func (l *PacketIntervalList) MoveToBack(e *PacketIntervalElement) {
- if e.list != l || l.root.prev == e {
- return
- }
- // see comment in List.Remove about initialization of l
- l.insert(l.remove(e), l.root.prev)
-}
-
-// MoveBefore moves element e to its new position before mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *PacketIntervalList) MoveBefore(e, mark *PacketIntervalElement) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.insert(l.remove(e), mark.prev)
-}
-
-// MoveAfter moves element e to its new position after mark.
-// If e or mark is not an element of l, or e == mark, the list is not modified.
-// The element and mark must not be nil.
-func (l *PacketIntervalList) MoveAfter(e, mark *PacketIntervalElement) {
- if e.list != l || e == mark || mark.list != l {
- return
- }
- l.insert(l.remove(e), mark)
-}
-
-// PushBackList inserts a copy of an other list at the back of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *PacketIntervalList) PushBackList(other *PacketIntervalList) {
- l.lazyInit()
- for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
- l.insertValue(e.Value, l.root.prev)
- }
-}
-
-// PushFrontList inserts a copy of an other list at the front of list l.
-// The lists l and other may be the same. They must not be nil.
-func (l *PacketIntervalList) PushFrontList(other *PacketIntervalList) {
- l.lazyInit()
- for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
- l.insertValue(e.Value, &l.root)
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go
deleted file mode 100644
index ec16d251b..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package utils
-
-import "github.com/lucas-clemente/quic-go/internal/protocol"
-
-// ByteInterval is an interval from one ByteCount to the other
-type ByteInterval struct {
- Start protocol.ByteCount
- End protocol.ByteCount
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go
deleted file mode 100644
index 9d9edab25..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package wire
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/quicvarint"
-)
-
-// ErrInvalidReservedBits is returned when the reserved bits are incorrect.
-// When this error is returned, parsing continues, and an ExtendedHeader is returned.
-// This is necessary because we need to decrypt the packet in that case,
-// in order to avoid a timing side-channel.
-var ErrInvalidReservedBits = errors.New("invalid reserved bits")
-
-// ExtendedHeader is the header of a QUIC packet.
-type ExtendedHeader struct {
- Header
-
- typeByte byte
-
- KeyPhase protocol.KeyPhaseBit
-
- PacketNumberLen protocol.PacketNumberLen
- PacketNumber protocol.PacketNumber
-
- parsedLen protocol.ByteCount
-}
-
-func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool /* reserved bits valid */, error) {
- startLen := b.Len()
- // read the (now unencrypted) first byte
- var err error
- h.typeByte, err = b.ReadByte()
- if err != nil {
- return false, err
- }
- if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil {
- return false, err
- }
- var reservedBitsValid bool
- if h.IsLongHeader {
- reservedBitsValid, err = h.parseLongHeader(b, v)
- } else {
- reservedBitsValid, err = h.parseShortHeader(b, v)
- }
- if err != nil {
- return false, err
- }
- h.parsedLen = protocol.ByteCount(startLen - b.Len())
- return reservedBitsValid, err
-}
-
-func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) {
- if err := h.readPacketNumber(b); err != nil {
- return false, err
- }
- if h.typeByte&0xc != 0 {
- return false, nil
- }
- return true, nil
-}
-
-func (h *ExtendedHeader) parseShortHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) {
- h.KeyPhase = protocol.KeyPhaseZero
- if h.typeByte&0x4 > 0 {
- h.KeyPhase = protocol.KeyPhaseOne
- }
-
- if err := h.readPacketNumber(b); err != nil {
- return false, err
- }
- if h.typeByte&0x18 != 0 {
- return false, nil
- }
- return true, nil
-}
-
-func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error {
- h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1
- switch h.PacketNumberLen {
- case protocol.PacketNumberLen1:
- n, err := b.ReadByte()
- if err != nil {
- return err
- }
- h.PacketNumber = protocol.PacketNumber(n)
- case protocol.PacketNumberLen2:
- n, err := utils.BigEndian.ReadUint16(b)
- if err != nil {
- return err
- }
- h.PacketNumber = protocol.PacketNumber(n)
- case protocol.PacketNumberLen3:
- n, err := utils.BigEndian.ReadUint24(b)
- if err != nil {
- return err
- }
- h.PacketNumber = protocol.PacketNumber(n)
- case protocol.PacketNumberLen4:
- n, err := utils.BigEndian.ReadUint32(b)
- if err != nil {
- return err
- }
- h.PacketNumber = protocol.PacketNumber(n)
- default:
- return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen)
- }
- return nil
-}
-
-// Write writes the Header.
-func (h *ExtendedHeader) Write(b *bytes.Buffer, ver protocol.VersionNumber) error {
- if h.DestConnectionID.Len() > protocol.MaxConnIDLen {
- return fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len())
- }
- if h.SrcConnectionID.Len() > protocol.MaxConnIDLen {
- return fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len())
- }
- if h.IsLongHeader {
- return h.writeLongHeader(b, ver)
- }
- return h.writeShortHeader(b, ver)
-}
-
-func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.VersionNumber) error {
- var packetType uint8
- if version == protocol.Version2 {
- //nolint:exhaustive
- switch h.Type {
- case protocol.PacketTypeInitial:
- packetType = 0b01
- case protocol.PacketType0RTT:
- packetType = 0b10
- case protocol.PacketTypeHandshake:
- packetType = 0b11
- case protocol.PacketTypeRetry:
- packetType = 0b00
- }
- } else {
- //nolint:exhaustive
- switch h.Type {
- case protocol.PacketTypeInitial:
- packetType = 0b00
- case protocol.PacketType0RTT:
- packetType = 0b01
- case protocol.PacketTypeHandshake:
- packetType = 0b10
- case protocol.PacketTypeRetry:
- packetType = 0b11
- }
- }
- firstByte := 0xc0 | packetType<<4
- if h.Type != protocol.PacketTypeRetry {
- // Retry packets don't have a packet number
- firstByte |= uint8(h.PacketNumberLen - 1)
- }
-
- b.WriteByte(firstByte)
- utils.BigEndian.WriteUint32(b, uint32(h.Version))
- b.WriteByte(uint8(h.DestConnectionID.Len()))
- b.Write(h.DestConnectionID.Bytes())
- b.WriteByte(uint8(h.SrcConnectionID.Len()))
- b.Write(h.SrcConnectionID.Bytes())
-
- //nolint:exhaustive
- switch h.Type {
- case protocol.PacketTypeRetry:
- b.Write(h.Token)
- return nil
- case protocol.PacketTypeInitial:
- quicvarint.Write(b, uint64(len(h.Token)))
- b.Write(h.Token)
- }
- quicvarint.WriteWithLen(b, uint64(h.Length), 2)
- return h.writePacketNumber(b)
-}
-
-func (h *ExtendedHeader) writeShortHeader(b *bytes.Buffer, _ protocol.VersionNumber) error {
- typeByte := 0x40 | uint8(h.PacketNumberLen-1)
- if h.KeyPhase == protocol.KeyPhaseOne {
- typeByte |= byte(1 << 2)
- }
-
- b.WriteByte(typeByte)
- b.Write(h.DestConnectionID.Bytes())
- return h.writePacketNumber(b)
-}
-
-func (h *ExtendedHeader) writePacketNumber(b *bytes.Buffer) error {
- switch h.PacketNumberLen {
- case protocol.PacketNumberLen1:
- b.WriteByte(uint8(h.PacketNumber))
- case protocol.PacketNumberLen2:
- utils.BigEndian.WriteUint16(b, uint16(h.PacketNumber))
- case protocol.PacketNumberLen3:
- utils.BigEndian.WriteUint24(b, uint32(h.PacketNumber))
- case protocol.PacketNumberLen4:
- utils.BigEndian.WriteUint32(b, uint32(h.PacketNumber))
- default:
- return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen)
- }
- return nil
-}
-
-// ParsedLen returns the number of bytes that were consumed when parsing the header
-func (h *ExtendedHeader) ParsedLen() protocol.ByteCount {
- return h.parsedLen
-}
-
-// GetLength determines the length of the Header.
-func (h *ExtendedHeader) GetLength(v protocol.VersionNumber) protocol.ByteCount {
- if h.IsLongHeader {
- length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */
- if h.Type == protocol.PacketTypeInitial {
- length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token))
- }
- return length
- }
-
- length := protocol.ByteCount(1 /* type byte */ + h.DestConnectionID.Len())
- length += protocol.ByteCount(h.PacketNumberLen)
- return length
-}
-
-// Log logs the Header
-func (h *ExtendedHeader) Log(logger utils.Logger) {
- if h.IsLongHeader {
- var token string
- if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry {
- if len(h.Token) == 0 {
- token = "Token: (empty), "
- } else {
- token = fmt.Sprintf("Token: %#x, ", h.Token)
- }
- if h.Type == protocol.PacketTypeRetry {
- logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version)
- return
- }
- }
- logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version)
- } else {
- logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", h.DestConnectionID, h.PacketNumber, h.PacketNumberLen, h.KeyPhase)
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go
deleted file mode 100644
index c5aa8a169..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package logging
-
-//go:generate sh -c "mockgen -package logging -self_package github.com/lucas-clemente/quic-go/logging -destination mock_connection_tracer_test.go github.com/lucas-clemente/quic-go/logging ConnectionTracer"
-//go:generate sh -c "mockgen -package logging -self_package github.com/lucas-clemente/quic-go/logging -destination mock_tracer_test.go github.com/lucas-clemente/quic-go/logging Tracer"
diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/mockgen.go
deleted file mode 100644
index 22c2c0e74..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/mockgen.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package quic
-
-//go:generate sh -c "./mockgen_private.sh quic mock_send_conn_test.go github.com/lucas-clemente/quic-go sendConn"
-//go:generate sh -c "./mockgen_private.sh quic mock_sender_test.go github.com/lucas-clemente/quic-go sender"
-//go:generate sh -c "./mockgen_private.sh quic mock_stream_internal_test.go github.com/lucas-clemente/quic-go streamI"
-//go:generate sh -c "./mockgen_private.sh quic mock_crypto_stream_test.go github.com/lucas-clemente/quic-go cryptoStream"
-//go:generate sh -c "./mockgen_private.sh quic mock_receive_stream_internal_test.go github.com/lucas-clemente/quic-go receiveStreamI"
-//go:generate sh -c "./mockgen_private.sh quic mock_send_stream_internal_test.go github.com/lucas-clemente/quic-go sendStreamI"
-//go:generate sh -c "./mockgen_private.sh quic mock_stream_sender_test.go github.com/lucas-clemente/quic-go streamSender"
-//go:generate sh -c "./mockgen_private.sh quic mock_stream_getter_test.go github.com/lucas-clemente/quic-go streamGetter"
-//go:generate sh -c "./mockgen_private.sh quic mock_crypto_data_handler_test.go github.com/lucas-clemente/quic-go cryptoDataHandler"
-//go:generate sh -c "./mockgen_private.sh quic mock_frame_source_test.go github.com/lucas-clemente/quic-go frameSource"
-//go:generate sh -c "./mockgen_private.sh quic mock_ack_frame_source_test.go github.com/lucas-clemente/quic-go ackFrameSource"
-//go:generate sh -c "./mockgen_private.sh quic mock_stream_manager_test.go github.com/lucas-clemente/quic-go streamManager"
-//go:generate sh -c "./mockgen_private.sh quic mock_sealing_manager_test.go github.com/lucas-clemente/quic-go sealingManager"
-//go:generate sh -c "./mockgen_private.sh quic mock_unpacker_test.go github.com/lucas-clemente/quic-go unpacker"
-//go:generate sh -c "./mockgen_private.sh quic mock_packer_test.go github.com/lucas-clemente/quic-go packer"
-//go:generate sh -c "./mockgen_private.sh quic mock_mtu_discoverer_test.go github.com/lucas-clemente/quic-go mtuDiscoverer"
-//go:generate sh -c "./mockgen_private.sh quic mock_conn_runner_test.go github.com/lucas-clemente/quic-go connRunner"
-//go:generate sh -c "./mockgen_private.sh quic mock_quic_conn_test.go github.com/lucas-clemente/quic-go quicConn"
-//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_test.go github.com/lucas-clemente/quic-go packetHandler"
-//go:generate sh -c "./mockgen_private.sh quic mock_unknown_packet_handler_test.go github.com/lucas-clemente/quic-go unknownPacketHandler"
-//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/lucas-clemente/quic-go packetHandlerManager"
-//go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/lucas-clemente/quic-go multiplexer"
-//go:generate sh -c "./mockgen_private.sh quic mock_batch_conn_test.go github.com/lucas-clemente/quic-go batchConn"
-//go:generate sh -c "mockgen -package quic -self_package github.com/lucas-clemente/quic-go -destination mock_token_store_test.go github.com/lucas-clemente/quic-go TokenStore"
-//go:generate sh -c "mockgen -package quic -self_package github.com/lucas-clemente/quic-go -destination mock_packetconn_test.go net PacketConn"
diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go b/vendor/github.com/lucas-clemente/quic-go/packet_packer.go
deleted file mode 100644
index 1d037ab28..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go
+++ /dev/null
@@ -1,894 +0,0 @@
-package quic
-
-import (
- "bytes"
- "errors"
- "fmt"
- "net"
- "time"
-
- "github.com/lucas-clemente/quic-go/internal/ackhandler"
- "github.com/lucas-clemente/quic-go/internal/handshake"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
-)
-
-type packer interface {
- PackCoalescedPacket() (*coalescedPacket, error)
- PackPacket() (*packedPacket, error)
- MaybePackProbePacket(protocol.EncryptionLevel) (*packedPacket, error)
- MaybePackAckPacket(handshakeConfirmed bool) (*packedPacket, error)
- PackConnectionClose(*qerr.TransportError) (*coalescedPacket, error)
- PackApplicationClose(*qerr.ApplicationError) (*coalescedPacket, error)
-
- SetMaxPacketSize(protocol.ByteCount)
- PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error)
-
- HandleTransportParameters(*wire.TransportParameters)
- SetToken([]byte)
-}
-
-type sealer interface {
- handshake.LongHeaderSealer
-}
-
-type payload struct {
- frames []ackhandler.Frame
- ack *wire.AckFrame
- length protocol.ByteCount
-}
-
-type packedPacket struct {
- buffer *packetBuffer
- *packetContents
-}
-
-type packetContents struct {
- header *wire.ExtendedHeader
- ack *wire.AckFrame
- frames []ackhandler.Frame
-
- length protocol.ByteCount
-
- isMTUProbePacket bool
-}
-
-type coalescedPacket struct {
- buffer *packetBuffer
- packets []*packetContents
-}
-
-func (p *packetContents) EncryptionLevel() protocol.EncryptionLevel {
- if !p.header.IsLongHeader {
- return protocol.Encryption1RTT
- }
- //nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data).
- switch p.header.Type {
- case protocol.PacketTypeInitial:
- return protocol.EncryptionInitial
- case protocol.PacketTypeHandshake:
- return protocol.EncryptionHandshake
- case protocol.PacketType0RTT:
- return protocol.Encryption0RTT
- default:
- panic("can't determine encryption level")
- }
-}
-
-func (p *packetContents) IsAckEliciting() bool {
- return ackhandler.HasAckElicitingFrames(p.frames)
-}
-
-func (p *packetContents) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet {
- largestAcked := protocol.InvalidPacketNumber
- if p.ack != nil {
- largestAcked = p.ack.LargestAcked()
- }
- encLevel := p.EncryptionLevel()
- for i := range p.frames {
- if p.frames[i].OnLost != nil {
- continue
- }
- switch encLevel {
- case protocol.EncryptionInitial:
- p.frames[i].OnLost = q.AddInitial
- case protocol.EncryptionHandshake:
- p.frames[i].OnLost = q.AddHandshake
- case protocol.Encryption0RTT, protocol.Encryption1RTT:
- p.frames[i].OnLost = q.AddAppData
- }
- }
- return &ackhandler.Packet{
- PacketNumber: p.header.PacketNumber,
- LargestAcked: largestAcked,
- Frames: p.frames,
- Length: p.length,
- EncryptionLevel: encLevel,
- SendTime: now,
- IsPathMTUProbePacket: p.isMTUProbePacket,
- }
-}
-
-func getMaxPacketSize(addr net.Addr) protocol.ByteCount {
- maxSize := protocol.ByteCount(protocol.MinInitialPacketSize)
- // If this is not a UDP address, we don't know anything about the MTU.
- // Use the minimum size of an Initial packet as the max packet size.
- if udpAddr, ok := addr.(*net.UDPAddr); ok {
- if utils.IsIPv4(udpAddr.IP) {
- maxSize = protocol.InitialPacketSizeIPv4
- } else {
- maxSize = protocol.InitialPacketSizeIPv6
- }
- }
- return maxSize
-}
-
-type packetNumberManager interface {
- PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen)
- PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber
-}
-
-type sealingManager interface {
- GetInitialSealer() (handshake.LongHeaderSealer, error)
- GetHandshakeSealer() (handshake.LongHeaderSealer, error)
- Get0RTTSealer() (handshake.LongHeaderSealer, error)
- Get1RTTSealer() (handshake.ShortHeaderSealer, error)
-}
-
-type frameSource interface {
- HasData() bool
- AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
- AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
-}
-
-type ackFrameSource interface {
- GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame
-}
-
-type packetPacker struct {
- srcConnID protocol.ConnectionID
- getDestConnID func() protocol.ConnectionID
-
- perspective protocol.Perspective
- version protocol.VersionNumber
- cryptoSetup sealingManager
-
- initialStream cryptoStream
- handshakeStream cryptoStream
-
- token []byte
-
- pnManager packetNumberManager
- framer frameSource
- acks ackFrameSource
- datagramQueue *datagramQueue
- retransmissionQueue *retransmissionQueue
-
- maxPacketSize protocol.ByteCount
- numNonAckElicitingAcks int
-}
-
-var _ packer = &packetPacker{}
-
-func newPacketPacker(
- srcConnID protocol.ConnectionID,
- getDestConnID func() protocol.ConnectionID,
- initialStream cryptoStream,
- handshakeStream cryptoStream,
- packetNumberManager packetNumberManager,
- retransmissionQueue *retransmissionQueue,
- remoteAddr net.Addr, // only used for determining the max packet size
- cryptoSetup sealingManager,
- framer frameSource,
- acks ackFrameSource,
- datagramQueue *datagramQueue,
- perspective protocol.Perspective,
- version protocol.VersionNumber,
-) *packetPacker {
- return &packetPacker{
- cryptoSetup: cryptoSetup,
- getDestConnID: getDestConnID,
- srcConnID: srcConnID,
- initialStream: initialStream,
- handshakeStream: handshakeStream,
- retransmissionQueue: retransmissionQueue,
- datagramQueue: datagramQueue,
- perspective: perspective,
- version: version,
- framer: framer,
- acks: acks,
- pnManager: packetNumberManager,
- maxPacketSize: getMaxPacketSize(remoteAddr),
- }
-}
-
-// PackConnectionClose packs a packet that closes the connection with a transport error.
-func (p *packetPacker) PackConnectionClose(e *qerr.TransportError) (*coalescedPacket, error) {
- var reason string
- // don't send details of crypto errors
- if !e.ErrorCode.IsCryptoError() {
- reason = e.ErrorMessage
- }
- return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason)
-}
-
-// PackApplicationClose packs a packet that closes the connection with an application error.
-func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError) (*coalescedPacket, error) {
- return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage)
-}
-
-func (p *packetPacker) packConnectionClose(
- isApplicationError bool,
- errorCode uint64,
- frameType uint64,
- reason string,
-) (*coalescedPacket, error) {
- var sealers [4]sealer
- var hdrs [4]*wire.ExtendedHeader
- var payloads [4]*payload
- var size protocol.ByteCount
- var numPackets uint8
- encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT}
- for i, encLevel := range encLevels {
- if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT {
- continue
- }
- ccf := &wire.ConnectionCloseFrame{
- IsApplicationError: isApplicationError,
- ErrorCode: errorCode,
- FrameType: frameType,
- ReasonPhrase: reason,
- }
- // don't send application errors in Initial or Handshake packets
- if isApplicationError && (encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake) {
- ccf.IsApplicationError = false
- ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode)
- ccf.ReasonPhrase = ""
- }
- payload := &payload{
- frames: []ackhandler.Frame{{Frame: ccf}},
- length: ccf.Length(p.version),
- }
-
- var sealer sealer
- var err error
- var keyPhase protocol.KeyPhaseBit // only set for 1-RTT
- switch encLevel {
- case protocol.EncryptionInitial:
- sealer, err = p.cryptoSetup.GetInitialSealer()
- case protocol.EncryptionHandshake:
- sealer, err = p.cryptoSetup.GetHandshakeSealer()
- case protocol.Encryption0RTT:
- sealer, err = p.cryptoSetup.Get0RTTSealer()
- case protocol.Encryption1RTT:
- var s handshake.ShortHeaderSealer
- s, err = p.cryptoSetup.Get1RTTSealer()
- if err == nil {
- keyPhase = s.KeyPhase()
- }
- sealer = s
- }
- if err == handshake.ErrKeysNotYetAvailable || err == handshake.ErrKeysDropped {
- continue
- }
- if err != nil {
- return nil, err
- }
- sealers[i] = sealer
- var hdr *wire.ExtendedHeader
- if encLevel == protocol.Encryption1RTT {
- hdr = p.getShortHeader(keyPhase)
- } else {
- hdr = p.getLongHeader(encLevel)
- }
- hdrs[i] = hdr
- payloads[i] = payload
- size += p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead())
- numPackets++
- }
- contents := make([]*packetContents, 0, numPackets)
- buffer := getPacketBuffer()
- for i, encLevel := range encLevels {
- if sealers[i] == nil {
- continue
- }
- var paddingLen protocol.ByteCount
- if encLevel == protocol.EncryptionInitial {
- paddingLen = p.initialPaddingLen(payloads[i].frames, size)
- }
- c, err := p.appendPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], false)
- if err != nil {
- return nil, err
- }
- contents = append(contents, c)
- }
- return &coalescedPacket{buffer: buffer, packets: contents}, nil
-}
-
-// packetLength calculates the length of the serialized packet.
-// It takes into account that packets that have a tiny payload need to be padded,
-// such that len(payload) + packet number len >= 4 + AEAD overhead
-func (p *packetPacker) packetLength(hdr *wire.ExtendedHeader, payload *payload) protocol.ByteCount {
- var paddingLen protocol.ByteCount
- pnLen := protocol.ByteCount(hdr.PacketNumberLen)
- if payload.length < 4-pnLen {
- paddingLen = 4 - pnLen - payload.length
- }
- return hdr.GetLength(p.version) + payload.length + paddingLen
-}
-
-func (p *packetPacker) MaybePackAckPacket(handshakeConfirmed bool) (*packedPacket, error) {
- var encLevel protocol.EncryptionLevel
- var ack *wire.AckFrame
- if !handshakeConfirmed {
- ack = p.acks.GetAckFrame(protocol.EncryptionInitial, true)
- if ack != nil {
- encLevel = protocol.EncryptionInitial
- } else {
- ack = p.acks.GetAckFrame(protocol.EncryptionHandshake, true)
- if ack != nil {
- encLevel = protocol.EncryptionHandshake
- }
- }
- }
- if ack == nil {
- ack = p.acks.GetAckFrame(protocol.Encryption1RTT, true)
- if ack == nil {
- return nil, nil
- }
- encLevel = protocol.Encryption1RTT
- }
- payload := &payload{
- ack: ack,
- length: ack.Length(p.version),
- }
-
- sealer, hdr, err := p.getSealerAndHeader(encLevel)
- if err != nil {
- return nil, err
- }
- return p.writeSinglePacket(hdr, payload, encLevel, sealer)
-}
-
-// size is the expected size of the packet, if no padding was applied.
-func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount {
- // For the server, only ack-eliciting Initial packets need to be padded.
- if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) {
- return 0
- }
- if size >= p.maxPacketSize {
- return 0
- }
- return p.maxPacketSize - size
-}
-
-// PackCoalescedPacket packs a new packet.
-// It packs an Initial / Handshake if there is data to send in these packet number spaces.
-// It should only be called before the handshake is confirmed.
-func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) {
- maxPacketSize := p.maxPacketSize
- if p.perspective == protocol.PerspectiveClient {
- maxPacketSize = protocol.MinInitialPacketSize
- }
- var initialHdr, handshakeHdr, appDataHdr *wire.ExtendedHeader
- var initialPayload, handshakePayload, appDataPayload *payload
- var numPackets int
- // Try packing an Initial packet.
- initialSealer, err := p.cryptoSetup.GetInitialSealer()
- if err != nil && err != handshake.ErrKeysDropped {
- return nil, err
- }
- var size protocol.ByteCount
- if initialSealer != nil {
- initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), size, protocol.EncryptionInitial)
- if initialPayload != nil {
- size += p.packetLength(initialHdr, initialPayload) + protocol.ByteCount(initialSealer.Overhead())
- numPackets++
- }
- }
-
- // Add a Handshake packet.
- var handshakeSealer sealer
- if size < maxPacketSize-protocol.MinCoalescedPacketSize {
- var err error
- handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer()
- if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable {
- return nil, err
- }
- if handshakeSealer != nil {
- handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), size, protocol.EncryptionHandshake)
- if handshakePayload != nil {
- s := p.packetLength(handshakeHdr, handshakePayload) + protocol.ByteCount(handshakeSealer.Overhead())
- size += s
- numPackets++
- }
- }
- }
-
- // Add a 0-RTT / 1-RTT packet.
- var appDataSealer sealer
- appDataEncLevel := protocol.Encryption1RTT
- if size < maxPacketSize-protocol.MinCoalescedPacketSize {
- var err error
- appDataSealer, appDataHdr, appDataPayload = p.maybeGetAppDataPacket(maxPacketSize-size, size)
- if err != nil {
- return nil, err
- }
- if appDataHdr != nil {
- if appDataHdr.IsLongHeader {
- appDataEncLevel = protocol.Encryption0RTT
- }
- if appDataPayload != nil {
- size += p.packetLength(appDataHdr, appDataPayload) + protocol.ByteCount(appDataSealer.Overhead())
- numPackets++
- }
- }
- }
-
- if numPackets == 0 {
- return nil, nil
- }
-
- buffer := getPacketBuffer()
- packet := &coalescedPacket{
- buffer: buffer,
- packets: make([]*packetContents, 0, numPackets),
- }
- if initialPayload != nil {
- padding := p.initialPaddingLen(initialPayload.frames, size)
- cont, err := p.appendPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, false)
- if err != nil {
- return nil, err
- }
- packet.packets = append(packet.packets, cont)
- }
- if handshakePayload != nil {
- cont, err := p.appendPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, false)
- if err != nil {
- return nil, err
- }
- packet.packets = append(packet.packets, cont)
- }
- if appDataPayload != nil {
- cont, err := p.appendPacket(buffer, appDataHdr, appDataPayload, 0, appDataEncLevel, appDataSealer, false)
- if err != nil {
- return nil, err
- }
- packet.packets = append(packet.packets, cont)
- }
- return packet, nil
-}
-
-// PackPacket packs a packet in the application data packet number space.
-// It should be called after the handshake is confirmed.
-func (p *packetPacker) PackPacket() (*packedPacket, error) {
- sealer, hdr, payload := p.maybeGetAppDataPacket(p.maxPacketSize, 0)
- if payload == nil {
- return nil, nil
- }
- buffer := getPacketBuffer()
- encLevel := protocol.Encryption1RTT
- if hdr.IsLongHeader {
- encLevel = protocol.Encryption0RTT
- }
- cont, err := p.appendPacket(buffer, hdr, payload, 0, encLevel, sealer, false)
- if err != nil {
- return nil, err
- }
- return &packedPacket{
- buffer: buffer,
- packetContents: cont,
- }, nil
-}
-
-func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize, currentSize protocol.ByteCount, encLevel protocol.EncryptionLevel) (*wire.ExtendedHeader, *payload) {
- var s cryptoStream
- var hasRetransmission bool
- //nolint:exhaustive // Initial and Handshake are the only two encryption levels here.
- switch encLevel {
- case protocol.EncryptionInitial:
- s = p.initialStream
- hasRetransmission = p.retransmissionQueue.HasInitialData()
- case protocol.EncryptionHandshake:
- s = p.handshakeStream
- hasRetransmission = p.retransmissionQueue.HasHandshakeData()
- }
-
- hasData := s.HasData()
- var ack *wire.AckFrame
- if encLevel == protocol.EncryptionInitial || currentSize == 0 {
- ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData)
- }
- if !hasData && !hasRetransmission && ack == nil {
- // nothing to send
- return nil, nil
- }
-
- var payload payload
- if ack != nil {
- payload.ack = ack
- payload.length = ack.Length(p.version)
- maxPacketSize -= payload.length
- }
- hdr := p.getLongHeader(encLevel)
- maxPacketSize -= hdr.GetLength(p.version)
- if hasRetransmission {
- for {
- var f wire.Frame
- //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s
- switch encLevel {
- case protocol.EncryptionInitial:
- f = p.retransmissionQueue.GetInitialFrame(maxPacketSize)
- case protocol.EncryptionHandshake:
- f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize)
- }
- if f == nil {
- break
- }
- payload.frames = append(payload.frames, ackhandler.Frame{Frame: f})
- frameLen := f.Length(p.version)
- payload.length += frameLen
- maxPacketSize -= frameLen
- }
- } else if s.HasData() {
- cf := s.PopCryptoFrame(maxPacketSize)
- payload.frames = []ackhandler.Frame{{Frame: cf}}
- payload.length += cf.Length(p.version)
- }
- return hdr, &payload
-}
-
-func (p *packetPacker) maybeGetAppDataPacket(maxPacketSize, currentSize protocol.ByteCount) (sealer, *wire.ExtendedHeader, *payload) {
- var sealer sealer
- var encLevel protocol.EncryptionLevel
- var hdr *wire.ExtendedHeader
- oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer()
- if err == nil {
- encLevel = protocol.Encryption1RTT
- sealer = oneRTTSealer
- hdr = p.getShortHeader(oneRTTSealer.KeyPhase())
- } else {
- // 1-RTT sealer not yet available
- if p.perspective != protocol.PerspectiveClient {
- return nil, nil, nil
- }
- sealer, err = p.cryptoSetup.Get0RTTSealer()
- if sealer == nil || err != nil {
- return nil, nil, nil
- }
- encLevel = protocol.Encryption0RTT
- hdr = p.getLongHeader(protocol.Encryption0RTT)
- }
-
- maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead())
- payload := p.maybeGetAppDataPacketWithEncLevel(maxPayloadSize, encLevel == protocol.Encryption1RTT && currentSize == 0)
- return sealer, hdr, payload
-}
-
-func (p *packetPacker) maybeGetAppDataPacketWithEncLevel(maxPayloadSize protocol.ByteCount, ackAllowed bool) *payload {
- payload := p.composeNextPacket(maxPayloadSize, ackAllowed)
-
- // check if we have anything to send
- if len(payload.frames) == 0 {
- if payload.ack == nil {
- return nil
- }
- // the packet only contains an ACK
- if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks {
- ping := &wire.PingFrame{}
- // don't retransmit the PING frame when it is lost
- payload.frames = append(payload.frames, ackhandler.Frame{Frame: ping, OnLost: func(wire.Frame) {}})
- payload.length += ping.Length(p.version)
- p.numNonAckElicitingAcks = 0
- } else {
- p.numNonAckElicitingAcks++
- }
- } else {
- p.numNonAckElicitingAcks = 0
- }
- return payload
-}
-
-func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, ackAllowed bool) *payload {
- payload := &payload{frames: make([]ackhandler.Frame, 0, 1)}
-
- var hasDatagram bool
- if p.datagramQueue != nil {
- if datagram := p.datagramQueue.Get(); datagram != nil {
- payload.frames = append(payload.frames, ackhandler.Frame{
- Frame: datagram,
- // set it to a no-op. Then we won't set the default callback, which would retransmit the frame.
- OnLost: func(wire.Frame) {},
- })
- payload.length += datagram.Length(p.version)
- hasDatagram = true
- }
- }
-
- var ack *wire.AckFrame
- hasData := p.framer.HasData()
- hasRetransmission := p.retransmissionQueue.HasAppData()
- // TODO: make sure ACKs are sent when a lot of DATAGRAMs are queued
- if !hasDatagram && ackAllowed {
- ack = p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData)
- if ack != nil {
- payload.ack = ack
- payload.length += ack.Length(p.version)
- }
- }
-
- if ack == nil && !hasData && !hasRetransmission {
- return payload
- }
-
- if hasRetransmission {
- for {
- remainingLen := maxFrameSize - payload.length
- if remainingLen < protocol.MinStreamFrameSize {
- break
- }
- f := p.retransmissionQueue.GetAppDataFrame(remainingLen)
- if f == nil {
- break
- }
- payload.frames = append(payload.frames, ackhandler.Frame{Frame: f})
- payload.length += f.Length(p.version)
- }
- }
-
- if hasData {
- var lengthAdded protocol.ByteCount
- payload.frames, lengthAdded = p.framer.AppendControlFrames(payload.frames, maxFrameSize-payload.length)
- payload.length += lengthAdded
-
- payload.frames, lengthAdded = p.framer.AppendStreamFrames(payload.frames, maxFrameSize-payload.length)
- payload.length += lengthAdded
- }
- return payload
-}
-
-func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) (*packedPacket, error) {
- var hdr *wire.ExtendedHeader
- var payload *payload
- var sealer sealer
- //nolint:exhaustive // Probe packets are never sent for 0-RTT.
- switch encLevel {
- case protocol.EncryptionInitial:
- var err error
- sealer, err = p.cryptoSetup.GetInitialSealer()
- if err != nil {
- return nil, err
- }
- hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), 0, protocol.EncryptionInitial)
- case protocol.EncryptionHandshake:
- var err error
- sealer, err = p.cryptoSetup.GetHandshakeSealer()
- if err != nil {
- return nil, err
- }
- hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), 0, protocol.EncryptionHandshake)
- case protocol.Encryption1RTT:
- oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer()
- if err != nil {
- return nil, err
- }
- sealer = oneRTTSealer
- hdr = p.getShortHeader(oneRTTSealer.KeyPhase())
- payload = p.maybeGetAppDataPacketWithEncLevel(p.maxPacketSize-protocol.ByteCount(sealer.Overhead())-hdr.GetLength(p.version), true)
- default:
- panic("unknown encryption level")
- }
- if payload == nil {
- return nil, nil
- }
- size := p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead())
- var padding protocol.ByteCount
- if encLevel == protocol.EncryptionInitial {
- padding = p.initialPaddingLen(payload.frames, size)
- }
- buffer := getPacketBuffer()
- cont, err := p.appendPacket(buffer, hdr, payload, padding, encLevel, sealer, false)
- if err != nil {
- return nil, err
- }
- return &packedPacket{
- buffer: buffer,
- packetContents: cont,
- }, nil
-}
-
-func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error) {
- payload := &payload{
- frames: []ackhandler.Frame{ping},
- length: ping.Length(p.version),
- }
- buffer := getPacketBuffer()
- sealer, err := p.cryptoSetup.Get1RTTSealer()
- if err != nil {
- return nil, err
- }
- hdr := p.getShortHeader(sealer.KeyPhase())
- padding := size - p.packetLength(hdr, payload) - protocol.ByteCount(sealer.Overhead())
- contents, err := p.appendPacket(buffer, hdr, payload, padding, protocol.Encryption1RTT, sealer, true)
- if err != nil {
- return nil, err
- }
- contents.isMTUProbePacket = true
- return &packedPacket{
- buffer: buffer,
- packetContents: contents,
- }, nil
-}
-
-func (p *packetPacker) getSealerAndHeader(encLevel protocol.EncryptionLevel) (sealer, *wire.ExtendedHeader, error) {
- switch encLevel {
- case protocol.EncryptionInitial:
- sealer, err := p.cryptoSetup.GetInitialSealer()
- if err != nil {
- return nil, nil, err
- }
- hdr := p.getLongHeader(protocol.EncryptionInitial)
- return sealer, hdr, nil
- case protocol.Encryption0RTT:
- sealer, err := p.cryptoSetup.Get0RTTSealer()
- if err != nil {
- return nil, nil, err
- }
- hdr := p.getLongHeader(protocol.Encryption0RTT)
- return sealer, hdr, nil
- case protocol.EncryptionHandshake:
- sealer, err := p.cryptoSetup.GetHandshakeSealer()
- if err != nil {
- return nil, nil, err
- }
- hdr := p.getLongHeader(protocol.EncryptionHandshake)
- return sealer, hdr, nil
- case protocol.Encryption1RTT:
- sealer, err := p.cryptoSetup.Get1RTTSealer()
- if err != nil {
- return nil, nil, err
- }
- hdr := p.getShortHeader(sealer.KeyPhase())
- return sealer, hdr, nil
- default:
- return nil, nil, fmt.Errorf("unexpected encryption level: %s", encLevel)
- }
-}
-
-func (p *packetPacker) getShortHeader(kp protocol.KeyPhaseBit) *wire.ExtendedHeader {
- pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
- hdr := &wire.ExtendedHeader{}
- hdr.PacketNumber = pn
- hdr.PacketNumberLen = pnLen
- hdr.DestConnectionID = p.getDestConnID()
- hdr.KeyPhase = kp
- return hdr
-}
-
-func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel) *wire.ExtendedHeader {
- pn, pnLen := p.pnManager.PeekPacketNumber(encLevel)
- hdr := &wire.ExtendedHeader{
- PacketNumber: pn,
- PacketNumberLen: pnLen,
- }
- hdr.IsLongHeader = true
- hdr.Version = p.version
- hdr.SrcConnectionID = p.srcConnID
- hdr.DestConnectionID = p.getDestConnID()
-
- //nolint:exhaustive // 1-RTT packets are not long header packets.
- switch encLevel {
- case protocol.EncryptionInitial:
- hdr.Type = protocol.PacketTypeInitial
- hdr.Token = p.token
- case protocol.EncryptionHandshake:
- hdr.Type = protocol.PacketTypeHandshake
- case protocol.Encryption0RTT:
- hdr.Type = protocol.PacketType0RTT
- }
- return hdr
-}
-
-// writeSinglePacket packs a single packet.
-func (p *packetPacker) writeSinglePacket(
- hdr *wire.ExtendedHeader,
- payload *payload,
- encLevel protocol.EncryptionLevel,
- sealer sealer,
-) (*packedPacket, error) {
- buffer := getPacketBuffer()
- var paddingLen protocol.ByteCount
- if encLevel == protocol.EncryptionInitial {
- paddingLen = p.initialPaddingLen(payload.frames, hdr.GetLength(p.version)+payload.length+protocol.ByteCount(sealer.Overhead()))
- }
- contents, err := p.appendPacket(buffer, hdr, payload, paddingLen, encLevel, sealer, false)
- if err != nil {
- return nil, err
- }
- return &packedPacket{
- buffer: buffer,
- packetContents: contents,
- }, nil
-}
-
-func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedHeader, payload *payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, isMTUProbePacket bool) (*packetContents, error) {
- var paddingLen protocol.ByteCount
- pnLen := protocol.ByteCount(header.PacketNumberLen)
- if payload.length < 4-pnLen {
- paddingLen = 4 - pnLen - payload.length
- }
- paddingLen += padding
- if header.IsLongHeader {
- header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + payload.length + paddingLen
- }
-
- hdrOffset := buffer.Len()
- buf := bytes.NewBuffer(buffer.Data)
- if err := header.Write(buf, p.version); err != nil {
- return nil, err
- }
- payloadOffset := buf.Len()
-
- if payload.ack != nil {
- if err := payload.ack.Write(buf, p.version); err != nil {
- return nil, err
- }
- }
- if paddingLen > 0 {
- buf.Write(make([]byte, paddingLen))
- }
- for _, frame := range payload.frames {
- if err := frame.Write(buf, p.version); err != nil {
- return nil, err
- }
- }
-
- if payloadSize := protocol.ByteCount(buf.Len()-payloadOffset) - paddingLen; payloadSize != payload.length {
- return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", payload.length, payloadSize)
- }
- if !isMTUProbePacket {
- if size := protocol.ByteCount(buf.Len() + sealer.Overhead()); size > p.maxPacketSize {
- return nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize)
- }
- }
-
- raw := buffer.Data
- // encrypt the packet
- raw = raw[:buf.Len()]
- _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], header.PacketNumber, raw[hdrOffset:payloadOffset])
- raw = raw[0 : buf.Len()+sealer.Overhead()]
- // apply header protection
- pnOffset := payloadOffset - int(header.PacketNumberLen)
- sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[hdrOffset], raw[pnOffset:payloadOffset])
- buffer.Data = raw
-
- num := p.pnManager.PopPacketNumber(encLevel)
- if num != header.PacketNumber {
- return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match")
- }
- return &packetContents{
- header: header,
- ack: payload.ack,
- frames: payload.frames,
- length: buffer.Len() - hdrOffset,
- }, nil
-}
-
-func (p *packetPacker) SetToken(token []byte) {
- p.token = token
-}
-
-// When a higher MTU is discovered, use it.
-func (p *packetPacker) SetMaxPacketSize(s protocol.ByteCount) {
- p.maxPacketSize = s
-}
-
-// If the peer sets a max_packet_size that's smaller than the size we're currently using,
-// we need to reduce the size of packets we send.
-func (p *packetPacker) HandleTransportParameters(params *wire.TransportParameters) {
- if params.MaxUDPPayloadSize != 0 {
- p.maxPacketSize = utils.MinByteCount(p.maxPacketSize, params.MaxUDPPayloadSize)
- }
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go
deleted file mode 100644
index 26b562331..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package quic
-
-import (
- "github.com/cheekybits/genny/generic"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
-)
-
-// In the auto-generated streams maps, we need to be able to close the streams.
-// Therefore, extend the generic.Type with the stream close method.
-// This definition must be in a file that Genny doesn't process.
-type item interface {
- generic.Type
- updateSendWindow(protocol.ByteCount)
- closeForShutdown(error)
-}
-
-const streamTypeGeneric protocol.StreamType = protocol.StreamTypeUni
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go
deleted file mode 100644
index 46c8c73a0..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package quic
-
-import (
- "context"
- "sync"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
-)
-
-// When a stream is deleted before it was accepted, we can't delete it from the map immediately.
-// We need to wait until the application accepts it, and delete it then.
-type streamIEntry struct {
- stream streamI
- shouldDelete bool
-}
-
-type incomingBidiStreamsMap struct {
- mutex sync.RWMutex
- newStreamChan chan struct{}
-
- streams map[protocol.StreamNum]streamIEntry
-
- nextStreamToAccept protocol.StreamNum // the next stream that will be returned by AcceptStream()
- nextStreamToOpen protocol.StreamNum // the highest stream that the peer opened
- maxStream protocol.StreamNum // the highest stream that the peer is allowed to open
- maxNumStreams uint64 // maximum number of streams
-
- newStream func(protocol.StreamNum) streamI
- queueMaxStreamID func(*wire.MaxStreamsFrame)
-
- closeErr error
-}
-
-func newIncomingBidiStreamsMap(
- newStream func(protocol.StreamNum) streamI,
- maxStreams uint64,
- queueControlFrame func(wire.Frame),
-) *incomingBidiStreamsMap {
- return &incomingBidiStreamsMap{
- newStreamChan: make(chan struct{}, 1),
- streams: make(map[protocol.StreamNum]streamIEntry),
- maxStream: protocol.StreamNum(maxStreams),
- maxNumStreams: maxStreams,
- newStream: newStream,
- nextStreamToOpen: 1,
- nextStreamToAccept: 1,
- queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) },
- }
-}
-
-func (m *incomingBidiStreamsMap) AcceptStream(ctx context.Context) (streamI, error) {
- // drain the newStreamChan, so we don't check the map twice if the stream doesn't exist
- select {
- case <-m.newStreamChan:
- default:
- }
-
- m.mutex.Lock()
-
- var num protocol.StreamNum
- var entry streamIEntry
- for {
- num = m.nextStreamToAccept
- if m.closeErr != nil {
- m.mutex.Unlock()
- return nil, m.closeErr
- }
- var ok bool
- entry, ok = m.streams[num]
- if ok {
- break
- }
- m.mutex.Unlock()
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case <-m.newStreamChan:
- }
- m.mutex.Lock()
- }
- m.nextStreamToAccept++
- // If this stream was completed before being accepted, we can delete it now.
- if entry.shouldDelete {
- if err := m.deleteStream(num); err != nil {
- m.mutex.Unlock()
- return nil, err
- }
- }
- m.mutex.Unlock()
- return entry.stream, nil
-}
-
-func (m *incomingBidiStreamsMap) GetOrOpenStream(num protocol.StreamNum) (streamI, error) {
- m.mutex.RLock()
- if num > m.maxStream {
- m.mutex.RUnlock()
- return nil, streamError{
- message: "peer tried to open stream %d (current limit: %d)",
- nums: []protocol.StreamNum{num, m.maxStream},
- }
- }
- // if the num is smaller than the highest we accepted
- // * this stream exists in the map, and we can return it, or
- // * this stream was already closed, then we can return the nil
- if num < m.nextStreamToOpen {
- var s streamI
- // If the stream was already queued for deletion, and is just waiting to be accepted, don't return it.
- if entry, ok := m.streams[num]; ok && !entry.shouldDelete {
- s = entry.stream
- }
- m.mutex.RUnlock()
- return s, nil
- }
- m.mutex.RUnlock()
-
- m.mutex.Lock()
- // no need to check the two error conditions from above again
- // * maxStream can only increase, so if the id was valid before, it definitely is valid now
- // * highestStream is only modified by this function
- for newNum := m.nextStreamToOpen; newNum <= num; newNum++ {
- m.streams[newNum] = streamIEntry{stream: m.newStream(newNum)}
- select {
- case m.newStreamChan <- struct{}{}:
- default:
- }
- }
- m.nextStreamToOpen = num + 1
- entry := m.streams[num]
- m.mutex.Unlock()
- return entry.stream, nil
-}
-
-func (m *incomingBidiStreamsMap) DeleteStream(num protocol.StreamNum) error {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- return m.deleteStream(num)
-}
-
-func (m *incomingBidiStreamsMap) deleteStream(num protocol.StreamNum) error {
- if _, ok := m.streams[num]; !ok {
- return streamError{
- message: "tried to delete unknown incoming stream %d",
- nums: []protocol.StreamNum{num},
- }
- }
-
- // Don't delete this stream yet, if it was not yet accepted.
- // Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted.
- if num >= m.nextStreamToAccept {
- entry, ok := m.streams[num]
- if ok && entry.shouldDelete {
- return streamError{
- message: "tried to delete incoming stream %d multiple times",
- nums: []protocol.StreamNum{num},
- }
- }
- entry.shouldDelete = true
- m.streams[num] = entry // can't assign to struct in map, so we need to reassign
- return nil
- }
-
- delete(m.streams, num)
- // queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream
- if m.maxNumStreams > uint64(len(m.streams)) {
- maxStream := m.nextStreamToOpen + protocol.StreamNum(m.maxNumStreams-uint64(len(m.streams))) - 1
- // Never send a value larger than protocol.MaxStreamCount.
- if maxStream <= protocol.MaxStreamCount {
- m.maxStream = maxStream
- m.queueMaxStreamID(&wire.MaxStreamsFrame{
- Type: protocol.StreamTypeBidi,
- MaxStreamNum: m.maxStream,
- })
- }
- }
- return nil
-}
-
-func (m *incomingBidiStreamsMap) CloseWithError(err error) {
- m.mutex.Lock()
- m.closeErr = err
- for _, entry := range m.streams {
- entry.stream.closeForShutdown(err)
- }
- m.mutex.Unlock()
- close(m.newStreamChan)
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go
deleted file mode 100644
index 4c7696a08..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package quic
-
-import (
- "context"
- "sync"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
-)
-
-// When a stream is deleted before it was accepted, we can't delete it from the map immediately.
-// We need to wait until the application accepts it, and delete it then.
-type itemEntry struct {
- stream item
- shouldDelete bool
-}
-
-//go:generate genny -in $GOFILE -out streams_map_incoming_bidi.go gen "item=streamI Item=BidiStream streamTypeGeneric=protocol.StreamTypeBidi"
-//go:generate genny -in $GOFILE -out streams_map_incoming_uni.go gen "item=receiveStreamI Item=UniStream streamTypeGeneric=protocol.StreamTypeUni"
-type incomingItemsMap struct {
- mutex sync.RWMutex
- newStreamChan chan struct{}
-
- streams map[protocol.StreamNum]itemEntry
-
- nextStreamToAccept protocol.StreamNum // the next stream that will be returned by AcceptStream()
- nextStreamToOpen protocol.StreamNum // the highest stream that the peer opened
- maxStream protocol.StreamNum // the highest stream that the peer is allowed to open
- maxNumStreams uint64 // maximum number of streams
-
- newStream func(protocol.StreamNum) item
- queueMaxStreamID func(*wire.MaxStreamsFrame)
-
- closeErr error
-}
-
-func newIncomingItemsMap(
- newStream func(protocol.StreamNum) item,
- maxStreams uint64,
- queueControlFrame func(wire.Frame),
-) *incomingItemsMap {
- return &incomingItemsMap{
- newStreamChan: make(chan struct{}, 1),
- streams: make(map[protocol.StreamNum]itemEntry),
- maxStream: protocol.StreamNum(maxStreams),
- maxNumStreams: maxStreams,
- newStream: newStream,
- nextStreamToOpen: 1,
- nextStreamToAccept: 1,
- queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) },
- }
-}
-
-func (m *incomingItemsMap) AcceptStream(ctx context.Context) (item, error) {
- // drain the newStreamChan, so we don't check the map twice if the stream doesn't exist
- select {
- case <-m.newStreamChan:
- default:
- }
-
- m.mutex.Lock()
-
- var num protocol.StreamNum
- var entry itemEntry
- for {
- num = m.nextStreamToAccept
- if m.closeErr != nil {
- m.mutex.Unlock()
- return nil, m.closeErr
- }
- var ok bool
- entry, ok = m.streams[num]
- if ok {
- break
- }
- m.mutex.Unlock()
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case <-m.newStreamChan:
- }
- m.mutex.Lock()
- }
- m.nextStreamToAccept++
- // If this stream was completed before being accepted, we can delete it now.
- if entry.shouldDelete {
- if err := m.deleteStream(num); err != nil {
- m.mutex.Unlock()
- return nil, err
- }
- }
- m.mutex.Unlock()
- return entry.stream, nil
-}
-
-func (m *incomingItemsMap) GetOrOpenStream(num protocol.StreamNum) (item, error) {
- m.mutex.RLock()
- if num > m.maxStream {
- m.mutex.RUnlock()
- return nil, streamError{
- message: "peer tried to open stream %d (current limit: %d)",
- nums: []protocol.StreamNum{num, m.maxStream},
- }
- }
- // if the num is smaller than the highest we accepted
- // * this stream exists in the map, and we can return it, or
- // * this stream was already closed, then we can return the nil
- if num < m.nextStreamToOpen {
- var s item
- // If the stream was already queued for deletion, and is just waiting to be accepted, don't return it.
- if entry, ok := m.streams[num]; ok && !entry.shouldDelete {
- s = entry.stream
- }
- m.mutex.RUnlock()
- return s, nil
- }
- m.mutex.RUnlock()
-
- m.mutex.Lock()
- // no need to check the two error conditions from above again
- // * maxStream can only increase, so if the id was valid before, it definitely is valid now
- // * highestStream is only modified by this function
- for newNum := m.nextStreamToOpen; newNum <= num; newNum++ {
- m.streams[newNum] = itemEntry{stream: m.newStream(newNum)}
- select {
- case m.newStreamChan <- struct{}{}:
- default:
- }
- }
- m.nextStreamToOpen = num + 1
- entry := m.streams[num]
- m.mutex.Unlock()
- return entry.stream, nil
-}
-
-func (m *incomingItemsMap) DeleteStream(num protocol.StreamNum) error {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- return m.deleteStream(num)
-}
-
-func (m *incomingItemsMap) deleteStream(num protocol.StreamNum) error {
- if _, ok := m.streams[num]; !ok {
- return streamError{
- message: "tried to delete unknown incoming stream %d",
- nums: []protocol.StreamNum{num},
- }
- }
-
- // Don't delete this stream yet, if it was not yet accepted.
- // Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted.
- if num >= m.nextStreamToAccept {
- entry, ok := m.streams[num]
- if ok && entry.shouldDelete {
- return streamError{
- message: "tried to delete incoming stream %d multiple times",
- nums: []protocol.StreamNum{num},
- }
- }
- entry.shouldDelete = true
- m.streams[num] = entry // can't assign to struct in map, so we need to reassign
- return nil
- }
-
- delete(m.streams, num)
- // queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream
- if m.maxNumStreams > uint64(len(m.streams)) {
- maxStream := m.nextStreamToOpen + protocol.StreamNum(m.maxNumStreams-uint64(len(m.streams))) - 1
- // Never send a value larger than protocol.MaxStreamCount.
- if maxStream <= protocol.MaxStreamCount {
- m.maxStream = maxStream
- m.queueMaxStreamID(&wire.MaxStreamsFrame{
- Type: streamTypeGeneric,
- MaxStreamNum: m.maxStream,
- })
- }
- }
- return nil
-}
-
-func (m *incomingItemsMap) CloseWithError(err error) {
- m.mutex.Lock()
- m.closeErr = err
- for _, entry := range m.streams {
- entry.stream.closeForShutdown(err)
- }
- m.mutex.Unlock()
- close(m.newStreamChan)
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go
deleted file mode 100644
index 3f7ec166a..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
-package quic
-
-import (
- "context"
- "sync"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
-)
-
-type outgoingBidiStreamsMap struct {
- mutex sync.RWMutex
-
- streams map[protocol.StreamNum]streamI
-
- openQueue map[uint64]chan struct{}
- lowestInQueue uint64
- highestInQueue uint64
-
- nextStream protocol.StreamNum // stream ID of the stream returned by OpenStream(Sync)
- maxStream protocol.StreamNum // the maximum stream ID we're allowed to open
- blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream
-
- newStream func(protocol.StreamNum) streamI
- queueStreamIDBlocked func(*wire.StreamsBlockedFrame)
-
- closeErr error
-}
-
-func newOutgoingBidiStreamsMap(
- newStream func(protocol.StreamNum) streamI,
- queueControlFrame func(wire.Frame),
-) *outgoingBidiStreamsMap {
- return &outgoingBidiStreamsMap{
- streams: make(map[protocol.StreamNum]streamI),
- openQueue: make(map[uint64]chan struct{}),
- maxStream: protocol.InvalidStreamNum,
- nextStream: 1,
- newStream: newStream,
- queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) },
- }
-}
-
-func (m *outgoingBidiStreamsMap) OpenStream() (streamI, error) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if m.closeErr != nil {
- return nil, m.closeErr
- }
-
- // if there are OpenStreamSync calls waiting, return an error here
- if len(m.openQueue) > 0 || m.nextStream > m.maxStream {
- m.maybeSendBlockedFrame()
- return nil, streamOpenErr{errTooManyOpenStreams}
- }
- return m.openStream(), nil
-}
-
-func (m *outgoingBidiStreamsMap) OpenStreamSync(ctx context.Context) (streamI, error) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if m.closeErr != nil {
- return nil, m.closeErr
- }
-
- if err := ctx.Err(); err != nil {
- return nil, err
- }
-
- if len(m.openQueue) == 0 && m.nextStream <= m.maxStream {
- return m.openStream(), nil
- }
-
- waitChan := make(chan struct{}, 1)
- queuePos := m.highestInQueue
- m.highestInQueue++
- if len(m.openQueue) == 0 {
- m.lowestInQueue = queuePos
- }
- m.openQueue[queuePos] = waitChan
- m.maybeSendBlockedFrame()
-
- for {
- m.mutex.Unlock()
- select {
- case <-ctx.Done():
- m.mutex.Lock()
- delete(m.openQueue, queuePos)
- return nil, ctx.Err()
- case <-waitChan:
- }
- m.mutex.Lock()
-
- if m.closeErr != nil {
- return nil, m.closeErr
- }
- if m.nextStream > m.maxStream {
- // no stream available. Continue waiting
- continue
- }
- str := m.openStream()
- delete(m.openQueue, queuePos)
- m.lowestInQueue = queuePos + 1
- m.unblockOpenSync()
- return str, nil
- }
-}
-
-func (m *outgoingBidiStreamsMap) openStream() streamI {
- s := m.newStream(m.nextStream)
- m.streams[m.nextStream] = s
- m.nextStream++
- return s
-}
-
-// maybeSendBlockedFrame queues a STREAMS_BLOCKED frame for the current stream offset,
-// if we haven't sent one for this offset yet
-func (m *outgoingBidiStreamsMap) maybeSendBlockedFrame() {
- if m.blockedSent {
- return
- }
-
- var streamNum protocol.StreamNum
- if m.maxStream != protocol.InvalidStreamNum {
- streamNum = m.maxStream
- }
- m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{
- Type: protocol.StreamTypeBidi,
- StreamLimit: streamNum,
- })
- m.blockedSent = true
-}
-
-func (m *outgoingBidiStreamsMap) GetStream(num protocol.StreamNum) (streamI, error) {
- m.mutex.RLock()
- if num >= m.nextStream {
- m.mutex.RUnlock()
- return nil, streamError{
- message: "peer attempted to open stream %d",
- nums: []protocol.StreamNum{num},
- }
- }
- s := m.streams[num]
- m.mutex.RUnlock()
- return s, nil
-}
-
-func (m *outgoingBidiStreamsMap) DeleteStream(num protocol.StreamNum) error {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if _, ok := m.streams[num]; !ok {
- return streamError{
- message: "tried to delete unknown outgoing stream %d",
- nums: []protocol.StreamNum{num},
- }
- }
- delete(m.streams, num)
- return nil
-}
-
-func (m *outgoingBidiStreamsMap) SetMaxStream(num protocol.StreamNum) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if num <= m.maxStream {
- return
- }
- m.maxStream = num
- m.blockedSent = false
- if m.maxStream < m.nextStream-1+protocol.StreamNum(len(m.openQueue)) {
- m.maybeSendBlockedFrame()
- }
- m.unblockOpenSync()
-}
-
-// UpdateSendWindow is called when the peer's transport parameters are received.
-// Only in the case of a 0-RTT handshake will we have open streams at this point.
-// We might need to update the send window, in case the server increased it.
-func (m *outgoingBidiStreamsMap) UpdateSendWindow(limit protocol.ByteCount) {
- m.mutex.Lock()
- for _, str := range m.streams {
- str.updateSendWindow(limit)
- }
- m.mutex.Unlock()
-}
-
-// unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream
-func (m *outgoingBidiStreamsMap) unblockOpenSync() {
- if len(m.openQueue) == 0 {
- return
- }
- for qp := m.lowestInQueue; qp <= m.highestInQueue; qp++ {
- c, ok := m.openQueue[qp]
- if !ok { // entry was deleted because the context was canceled
- continue
- }
- // unblockOpenSync is called both from OpenStreamSync and from SetMaxStream.
- // It's sufficient to only unblock OpenStreamSync once.
- select {
- case c <- struct{}{}:
- default:
- }
- return
- }
-}
-
-func (m *outgoingBidiStreamsMap) CloseWithError(err error) {
- m.mutex.Lock()
- m.closeErr = err
- for _, str := range m.streams {
- str.closeForShutdown(err)
- }
- for _, c := range m.openQueue {
- if c != nil {
- close(c)
- }
- }
- m.mutex.Unlock()
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go
deleted file mode 100644
index dde75043c..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package quic
-
-import (
- "context"
- "sync"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
-)
-
-//go:generate genny -in $GOFILE -out streams_map_outgoing_bidi.go gen "item=streamI Item=BidiStream streamTypeGeneric=protocol.StreamTypeBidi"
-//go:generate genny -in $GOFILE -out streams_map_outgoing_uni.go gen "item=sendStreamI Item=UniStream streamTypeGeneric=protocol.StreamTypeUni"
-type outgoingItemsMap struct {
- mutex sync.RWMutex
-
- streams map[protocol.StreamNum]item
-
- openQueue map[uint64]chan struct{}
- lowestInQueue uint64
- highestInQueue uint64
-
- nextStream protocol.StreamNum // stream ID of the stream returned by OpenStream(Sync)
- maxStream protocol.StreamNum // the maximum stream ID we're allowed to open
- blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream
-
- newStream func(protocol.StreamNum) item
- queueStreamIDBlocked func(*wire.StreamsBlockedFrame)
-
- closeErr error
-}
-
-func newOutgoingItemsMap(
- newStream func(protocol.StreamNum) item,
- queueControlFrame func(wire.Frame),
-) *outgoingItemsMap {
- return &outgoingItemsMap{
- streams: make(map[protocol.StreamNum]item),
- openQueue: make(map[uint64]chan struct{}),
- maxStream: protocol.InvalidStreamNum,
- nextStream: 1,
- newStream: newStream,
- queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) },
- }
-}
-
-func (m *outgoingItemsMap) OpenStream() (item, error) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if m.closeErr != nil {
- return nil, m.closeErr
- }
-
- // if there are OpenStreamSync calls waiting, return an error here
- if len(m.openQueue) > 0 || m.nextStream > m.maxStream {
- m.maybeSendBlockedFrame()
- return nil, streamOpenErr{errTooManyOpenStreams}
- }
- return m.openStream(), nil
-}
-
-func (m *outgoingItemsMap) OpenStreamSync(ctx context.Context) (item, error) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if m.closeErr != nil {
- return nil, m.closeErr
- }
-
- if err := ctx.Err(); err != nil {
- return nil, err
- }
-
- if len(m.openQueue) == 0 && m.nextStream <= m.maxStream {
- return m.openStream(), nil
- }
-
- waitChan := make(chan struct{}, 1)
- queuePos := m.highestInQueue
- m.highestInQueue++
- if len(m.openQueue) == 0 {
- m.lowestInQueue = queuePos
- }
- m.openQueue[queuePos] = waitChan
- m.maybeSendBlockedFrame()
-
- for {
- m.mutex.Unlock()
- select {
- case <-ctx.Done():
- m.mutex.Lock()
- delete(m.openQueue, queuePos)
- return nil, ctx.Err()
- case <-waitChan:
- }
- m.mutex.Lock()
-
- if m.closeErr != nil {
- return nil, m.closeErr
- }
- if m.nextStream > m.maxStream {
- // no stream available. Continue waiting
- continue
- }
- str := m.openStream()
- delete(m.openQueue, queuePos)
- m.lowestInQueue = queuePos + 1
- m.unblockOpenSync()
- return str, nil
- }
-}
-
-func (m *outgoingItemsMap) openStream() item {
- s := m.newStream(m.nextStream)
- m.streams[m.nextStream] = s
- m.nextStream++
- return s
-}
-
-// maybeSendBlockedFrame queues a STREAMS_BLOCKED frame for the current stream offset,
-// if we haven't sent one for this offset yet
-func (m *outgoingItemsMap) maybeSendBlockedFrame() {
- if m.blockedSent {
- return
- }
-
- var streamNum protocol.StreamNum
- if m.maxStream != protocol.InvalidStreamNum {
- streamNum = m.maxStream
- }
- m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{
- Type: streamTypeGeneric,
- StreamLimit: streamNum,
- })
- m.blockedSent = true
-}
-
-func (m *outgoingItemsMap) GetStream(num protocol.StreamNum) (item, error) {
- m.mutex.RLock()
- if num >= m.nextStream {
- m.mutex.RUnlock()
- return nil, streamError{
- message: "peer attempted to open stream %d",
- nums: []protocol.StreamNum{num},
- }
- }
- s := m.streams[num]
- m.mutex.RUnlock()
- return s, nil
-}
-
-func (m *outgoingItemsMap) DeleteStream(num protocol.StreamNum) error {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if _, ok := m.streams[num]; !ok {
- return streamError{
- message: "tried to delete unknown outgoing stream %d",
- nums: []protocol.StreamNum{num},
- }
- }
- delete(m.streams, num)
- return nil
-}
-
-func (m *outgoingItemsMap) SetMaxStream(num protocol.StreamNum) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- if num <= m.maxStream {
- return
- }
- m.maxStream = num
- m.blockedSent = false
- if m.maxStream < m.nextStream-1+protocol.StreamNum(len(m.openQueue)) {
- m.maybeSendBlockedFrame()
- }
- m.unblockOpenSync()
-}
-
-// UpdateSendWindow is called when the peer's transport parameters are received.
-// Only in the case of a 0-RTT handshake will we have open streams at this point.
-// We might need to update the send window, in case the server increased it.
-func (m *outgoingItemsMap) UpdateSendWindow(limit protocol.ByteCount) {
- m.mutex.Lock()
- for _, str := range m.streams {
- str.updateSendWindow(limit)
- }
- m.mutex.Unlock()
-}
-
-// unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream
-func (m *outgoingItemsMap) unblockOpenSync() {
- if len(m.openQueue) == 0 {
- return
- }
- for qp := m.lowestInQueue; qp <= m.highestInQueue; qp++ {
- c, ok := m.openQueue[qp]
- if !ok { // entry was deleted because the context was canceled
- continue
- }
- // unblockOpenSync is called both from OpenStreamSync and from SetMaxStream.
- // It's sufficient to only unblock OpenStreamSync once.
- select {
- case c <- struct{}{}:
- default:
- }
- return
- }
-}
-
-func (m *outgoingItemsMap) CloseWithError(err error) {
- m.mutex.Lock()
- m.closeErr = err
- for _, str := range m.streams {
- str.closeForShutdown(err)
- }
- for _, c := range m.openQueue {
- if c != nil {
- close(c)
- }
- }
- m.mutex.Unlock()
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/tools.go b/vendor/github.com/lucas-clemente/quic-go/tools.go
deleted file mode 100644
index ee68fafbe..000000000
--- a/vendor/github.com/lucas-clemente/quic-go/tools.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build tools
-// +build tools
-
-package quic
-
-import (
- _ "github.com/cheekybits/genny"
- _ "github.com/onsi/ginkgo/ginkgo"
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/README.md b/vendor/github.com/marten-seemann/qtls-go1-16/README.md
deleted file mode 100644
index 0d318abe4..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# qtls
-
-[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/marten-seemann/qtls)
-[![CircleCI Build Status](https://img.shields.io/circleci/project/github/marten-seemann/qtls.svg?style=flat-square&label=CircleCI+build)](https://circleci.com/gh/marten-seemann/qtls)
-
-This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go).
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/auth.go b/vendor/github.com/marten-seemann/qtls-go1-16/auth.go
deleted file mode 100644
index 1ef675fd3..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/auth.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "errors"
- "fmt"
- "hash"
- "io"
-)
-
-// verifyHandshakeSignature verifies a signature against pre-hashed
-// (if required) handshake contents.
-func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, signed, sig []byte) error {
- switch sigType {
- case signatureECDSA:
- pubKey, ok := pubkey.(*ecdsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an ECDSA public key, got %T", pubkey)
- }
- if !ecdsa.VerifyASN1(pubKey, signed, sig) {
- return errors.New("ECDSA verification failure")
- }
- case signatureEd25519:
- pubKey, ok := pubkey.(ed25519.PublicKey)
- if !ok {
- return fmt.Errorf("expected an Ed25519 public key, got %T", pubkey)
- }
- if !ed25519.Verify(pubKey, signed, sig) {
- return errors.New("Ed25519 verification failure")
- }
- case signaturePKCS1v15:
- pubKey, ok := pubkey.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an RSA public key, got %T", pubkey)
- }
- if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, signed, sig); err != nil {
- return err
- }
- case signatureRSAPSS:
- pubKey, ok := pubkey.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an RSA public key, got %T", pubkey)
- }
- signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}
- if err := rsa.VerifyPSS(pubKey, hashFunc, signed, sig, signOpts); err != nil {
- return err
- }
- default:
- return errors.New("internal error: unknown signature type")
- }
- return nil
-}
-
-const (
- serverSignatureContext = "TLS 1.3, server CertificateVerify\x00"
- clientSignatureContext = "TLS 1.3, client CertificateVerify\x00"
-)
-
-var signaturePadding = []byte{
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
-}
-
-// signedMessage returns the pre-hashed (if necessary) message to be signed by
-// certificate keys in TLS 1.3. See RFC 8446, Section 4.4.3.
-func signedMessage(sigHash crypto.Hash, context string, transcript hash.Hash) []byte {
- if sigHash == directSigning {
- b := &bytes.Buffer{}
- b.Write(signaturePadding)
- io.WriteString(b, context)
- b.Write(transcript.Sum(nil))
- return b.Bytes()
- }
- h := sigHash.New()
- h.Write(signaturePadding)
- io.WriteString(h, context)
- h.Write(transcript.Sum(nil))
- return h.Sum(nil)
-}
-
-// typeAndHashFromSignatureScheme returns the corresponding signature type and
-// crypto.Hash for a given TLS SignatureScheme.
-func typeAndHashFromSignatureScheme(signatureAlgorithm SignatureScheme) (sigType uint8, hash crypto.Hash, err error) {
- switch signatureAlgorithm {
- case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512:
- sigType = signaturePKCS1v15
- case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512:
- sigType = signatureRSAPSS
- case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512:
- sigType = signatureECDSA
- case Ed25519:
- sigType = signatureEd25519
- default:
- return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
- }
- switch signatureAlgorithm {
- case PKCS1WithSHA1, ECDSAWithSHA1:
- hash = crypto.SHA1
- case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256:
- hash = crypto.SHA256
- case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384:
- hash = crypto.SHA384
- case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512:
- hash = crypto.SHA512
- case Ed25519:
- hash = directSigning
- default:
- return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
- }
- return sigType, hash, nil
-}
-
-// legacyTypeAndHashFromPublicKey returns the fixed signature type and crypto.Hash for
-// a given public key used with TLS 1.0 and 1.1, before the introduction of
-// signature algorithm negotiation.
-func legacyTypeAndHashFromPublicKey(pub crypto.PublicKey) (sigType uint8, hash crypto.Hash, err error) {
- switch pub.(type) {
- case *rsa.PublicKey:
- return signaturePKCS1v15, crypto.MD5SHA1, nil
- case *ecdsa.PublicKey:
- return signatureECDSA, crypto.SHA1, nil
- case ed25519.PublicKey:
- // RFC 8422 specifies support for Ed25519 in TLS 1.0 and 1.1,
- // but it requires holding on to a handshake transcript to do a
- // full signature, and not even OpenSSL bothers with the
- // complexity, so we can't even test it properly.
- return 0, 0, fmt.Errorf("tls: Ed25519 public keys are not supported before TLS 1.2")
- default:
- return 0, 0, fmt.Errorf("tls: unsupported public key: %T", pub)
- }
-}
-
-var rsaSignatureSchemes = []struct {
- scheme SignatureScheme
- minModulusBytes int
- maxVersion uint16
-}{
- // RSA-PSS is used with PSSSaltLengthEqualsHash, and requires
- // emLen >= hLen + sLen + 2
- {PSSWithSHA256, crypto.SHA256.Size()*2 + 2, VersionTLS13},
- {PSSWithSHA384, crypto.SHA384.Size()*2 + 2, VersionTLS13},
- {PSSWithSHA512, crypto.SHA512.Size()*2 + 2, VersionTLS13},
- // PKCS #1 v1.5 uses prefixes from hashPrefixes in crypto/rsa, and requires
- // emLen >= len(prefix) + hLen + 11
- // TLS 1.3 dropped support for PKCS #1 v1.5 in favor of RSA-PSS.
- {PKCS1WithSHA256, 19 + crypto.SHA256.Size() + 11, VersionTLS12},
- {PKCS1WithSHA384, 19 + crypto.SHA384.Size() + 11, VersionTLS12},
- {PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11, VersionTLS12},
- {PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11, VersionTLS12},
-}
-
-// signatureSchemesForCertificate returns the list of supported SignatureSchemes
-// for a given certificate, based on the public key and the protocol version,
-// and optionally filtered by its explicit SupportedSignatureAlgorithms.
-//
-// This function must be kept in sync with supportedSignatureAlgorithms.
-func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme {
- priv, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return nil
- }
-
- var sigAlgs []SignatureScheme
- switch pub := priv.Public().(type) {
- case *ecdsa.PublicKey:
- if version != VersionTLS13 {
- // In TLS 1.2 and earlier, ECDSA algorithms are not
- // constrained to a single curve.
- sigAlgs = []SignatureScheme{
- ECDSAWithP256AndSHA256,
- ECDSAWithP384AndSHA384,
- ECDSAWithP521AndSHA512,
- ECDSAWithSHA1,
- }
- break
- }
- switch pub.Curve {
- case elliptic.P256():
- sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256}
- case elliptic.P384():
- sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384}
- case elliptic.P521():
- sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512}
- default:
- return nil
- }
- case *rsa.PublicKey:
- size := pub.Size()
- sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes))
- for _, candidate := range rsaSignatureSchemes {
- if size >= candidate.minModulusBytes && version <= candidate.maxVersion {
- sigAlgs = append(sigAlgs, candidate.scheme)
- }
- }
- case ed25519.PublicKey:
- sigAlgs = []SignatureScheme{Ed25519}
- default:
- return nil
- }
-
- if cert.SupportedSignatureAlgorithms != nil {
- var filteredSigAlgs []SignatureScheme
- for _, sigAlg := range sigAlgs {
- if isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) {
- filteredSigAlgs = append(filteredSigAlgs, sigAlg)
- }
- }
- return filteredSigAlgs
- }
- return sigAlgs
-}
-
-// selectSignatureScheme picks a SignatureScheme from the peer's preference list
-// that works with the selected certificate. It's only called for protocol
-// versions that support signature algorithms, so TLS 1.2 and 1.3.
-func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) {
- supportedAlgs := signatureSchemesForCertificate(vers, c)
- if len(supportedAlgs) == 0 {
- return 0, unsupportedCertificateError(c)
- }
- if len(peerAlgs) == 0 && vers == VersionTLS12 {
- // For TLS 1.2, if the client didn't send signature_algorithms then we
- // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1.
- peerAlgs = []SignatureScheme{PKCS1WithSHA1, ECDSAWithSHA1}
- }
- // Pick signature scheme in the peer's preference order, as our
- // preference order is not configurable.
- for _, preferredAlg := range peerAlgs {
- if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) {
- return preferredAlg, nil
- }
- }
- return 0, errors.New("tls: peer doesn't support any of the certificate's signature algorithms")
-}
-
-// unsupportedCertificateError returns a helpful error for certificates with
-// an unsupported private key.
-func unsupportedCertificateError(cert *Certificate) error {
- switch cert.PrivateKey.(type) {
- case rsa.PrivateKey, ecdsa.PrivateKey:
- return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T",
- cert.PrivateKey, cert.PrivateKey)
- case *ed25519.PrivateKey:
- return fmt.Errorf("tls: unsupported certificate: private key is *ed25519.PrivateKey, expected ed25519.PrivateKey")
- }
-
- signer, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer",
- cert.PrivateKey)
- }
-
- switch pub := signer.Public().(type) {
- case *ecdsa.PublicKey:
- switch pub.Curve {
- case elliptic.P256():
- case elliptic.P384():
- case elliptic.P521():
- default:
- return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name)
- }
- case *rsa.PublicKey:
- return fmt.Errorf("tls: certificate RSA key size too small for supported signature algorithms")
- case ed25519.PublicKey:
- default:
- return fmt.Errorf("tls: unsupported certificate key (%T)", pub)
- }
-
- if cert.SupportedSignatureAlgorithms != nil {
- return fmt.Errorf("tls: peer doesn't support the certificate custom signature algorithms")
- }
-
- return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey)
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/cipher_suites.go b/vendor/github.com/marten-seemann/qtls-go1-16/cipher_suites.go
deleted file mode 100644
index 78f80107f..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/cipher_suites.go
+++ /dev/null
@@ -1,532 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
- "crypto/hmac"
- "crypto/rc4"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/x509"
- "fmt"
- "hash"
-
- "golang.org/x/crypto/chacha20poly1305"
-)
-
-// CipherSuite is a TLS cipher suite. Note that most functions in this package
-// accept and expose cipher suite IDs instead of this type.
-type CipherSuite struct {
- ID uint16
- Name string
-
- // Supported versions is the list of TLS protocol versions that can
- // negotiate this cipher suite.
- SupportedVersions []uint16
-
- // Insecure is true if the cipher suite has known security issues
- // due to its primitives, design, or implementation.
- Insecure bool
-}
-
-var (
- supportedUpToTLS12 = []uint16{VersionTLS10, VersionTLS11, VersionTLS12}
- supportedOnlyTLS12 = []uint16{VersionTLS12}
- supportedOnlyTLS13 = []uint16{VersionTLS13}
-)
-
-// CipherSuites returns a list of cipher suites currently implemented by this
-// package, excluding those with security issues, which are returned by
-// InsecureCipherSuites.
-//
-// The list is sorted by ID. Note that the default cipher suites selected by
-// this package might depend on logic that can't be captured by a static list.
-func CipherSuites() []*CipherSuite {
- return []*CipherSuite{
- {TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
-
- {TLS_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", supportedOnlyTLS13, false},
- {TLS_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", supportedOnlyTLS13, false},
- {TLS_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", supportedOnlyTLS13, false},
-
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
- }
-}
-
-// InsecureCipherSuites returns a list of cipher suites currently implemented by
-// this package and which have security issues.
-//
-// Most applications should not use the cipher suites in this list, and should
-// only use those returned by CipherSuites.
-func InsecureCipherSuites() []*CipherSuite {
- // RC4 suites are broken because RC4 is.
- // CBC-SHA256 suites have no Lucky13 countermeasures.
- return []*CipherSuite{
- {TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- }
-}
-
-// CipherSuiteName returns the standard name for the passed cipher suite ID
-// (e.g. "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"), or a fallback representation
-// of the ID value if the cipher suite is not implemented by this package.
-func CipherSuiteName(id uint16) string {
- for _, c := range CipherSuites() {
- if c.ID == id {
- return c.Name
- }
- }
- for _, c := range InsecureCipherSuites() {
- if c.ID == id {
- return c.Name
- }
- }
- return fmt.Sprintf("0x%04X", id)
-}
-
-// a keyAgreement implements the client and server side of a TLS key agreement
-// protocol by generating and processing key exchange messages.
-type keyAgreement interface {
- // On the server side, the first two methods are called in order.
-
- // In the case that the key agreement protocol doesn't use a
- // ServerKeyExchange message, generateServerKeyExchange can return nil,
- // nil.
- generateServerKeyExchange(*config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)
- processClientKeyExchange(*config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error)
-
- // On the client side, the next two methods are called in order.
-
- // This method may not be called if the server doesn't send a
- // ServerKeyExchange message.
- processServerKeyExchange(*config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error
- generateClientKeyExchange(*config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)
-}
-
-const (
- // suiteECDHE indicates that the cipher suite involves elliptic curve
- // Diffie-Hellman. This means that it should only be selected when the
- // client indicates that it supports ECC with a curve and point format
- // that we're happy with.
- suiteECDHE = 1 << iota
- // suiteECSign indicates that the cipher suite involves an ECDSA or
- // EdDSA signature and therefore may only be selected when the server's
- // certificate is ECDSA or EdDSA. If this is not set then the cipher suite
- // is RSA based.
- suiteECSign
- // suiteTLS12 indicates that the cipher suite should only be advertised
- // and accepted when using TLS 1.2.
- suiteTLS12
- // suiteSHA384 indicates that the cipher suite uses SHA384 as the
- // handshake hash.
- suiteSHA384
- // suiteDefaultOff indicates that this cipher suite is not included by
- // default.
- suiteDefaultOff
-)
-
-// A cipherSuite is a specific combination of key agreement, cipher and MAC function.
-type cipherSuite struct {
- id uint16
- // the lengths, in bytes, of the key material needed for each component.
- keyLen int
- macLen int
- ivLen int
- ka func(version uint16) keyAgreement
- // flags is a bitmask of the suite* values, above.
- flags int
- cipher func(key, iv []byte, isRead bool) interface{}
- mac func(key []byte) hash.Hash
- aead func(key, fixedNonce []byte) aead
-}
-
-var cipherSuites = []*cipherSuite{
- // Ciphersuite order is chosen so that ECDHE comes before plain RSA and
- // AEADs are the top preference.
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
- {TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
- {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
- {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
- {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
-
- // RC4-based cipher suites are disabled by default.
- {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, suiteDefaultOff, cipherRC4, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE | suiteDefaultOff, cipherRC4, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteDefaultOff, cipherRC4, macSHA1, nil},
-}
-
-// selectCipherSuite returns the first cipher suite from ids which is also in
-// supportedIDs and passes the ok filter.
-func selectCipherSuite(ids, supportedIDs []uint16, ok func(*cipherSuite) bool) *cipherSuite {
- for _, id := range ids {
- candidate := cipherSuiteByID(id)
- if candidate == nil || !ok(candidate) {
- continue
- }
-
- for _, suppID := range supportedIDs {
- if id == suppID {
- return candidate
- }
- }
- }
- return nil
-}
-
-// A cipherSuiteTLS13 defines only the pair of the AEAD algorithm and hash
-// algorithm to be used with HKDF. See RFC 8446, Appendix B.4.
-type cipherSuiteTLS13 struct {
- id uint16
- keyLen int
- aead func(key, fixedNonce []byte) aead
- hash crypto.Hash
-}
-
-type CipherSuiteTLS13 struct {
- ID uint16
- KeyLen int
- Hash crypto.Hash
- AEAD func(key, fixedNonce []byte) cipher.AEAD
-}
-
-func (c *CipherSuiteTLS13) IVLen() int {
- return aeadNonceLength
-}
-
-var cipherSuitesTLS13 = []*cipherSuiteTLS13{
- {TLS_AES_128_GCM_SHA256, 16, aeadAESGCMTLS13, crypto.SHA256},
- {TLS_CHACHA20_POLY1305_SHA256, 32, aeadChaCha20Poly1305, crypto.SHA256},
- {TLS_AES_256_GCM_SHA384, 32, aeadAESGCMTLS13, crypto.SHA384},
-}
-
-func cipherRC4(key, iv []byte, isRead bool) interface{} {
- cipher, _ := rc4.NewCipher(key)
- return cipher
-}
-
-func cipher3DES(key, iv []byte, isRead bool) interface{} {
- block, _ := des.NewTripleDESCipher(key)
- if isRead {
- return cipher.NewCBCDecrypter(block, iv)
- }
- return cipher.NewCBCEncrypter(block, iv)
-}
-
-func cipherAES(key, iv []byte, isRead bool) interface{} {
- block, _ := aes.NewCipher(key)
- if isRead {
- return cipher.NewCBCDecrypter(block, iv)
- }
- return cipher.NewCBCEncrypter(block, iv)
-}
-
-// macSHA1 returns a SHA-1 based constant time MAC.
-func macSHA1(key []byte) hash.Hash {
- return hmac.New(newConstantTimeHash(sha1.New), key)
-}
-
-// macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and
-// is currently only used in disabled-by-default cipher suites.
-func macSHA256(key []byte) hash.Hash {
- return hmac.New(sha256.New, key)
-}
-
-type aead interface {
- cipher.AEAD
-
- // explicitNonceLen returns the number of bytes of explicit nonce
- // included in each record. This is eight for older AEADs and
- // zero for modern ones.
- explicitNonceLen() int
-}
-
-const (
- aeadNonceLength = 12
- noncePrefixLength = 4
-)
-
-// prefixNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
-// each call.
-type prefixNonceAEAD struct {
- // nonce contains the fixed part of the nonce in the first four bytes.
- nonce [aeadNonceLength]byte
- aead cipher.AEAD
-}
-
-func (f *prefixNonceAEAD) NonceSize() int { return aeadNonceLength - noncePrefixLength }
-func (f *prefixNonceAEAD) Overhead() int { return f.aead.Overhead() }
-func (f *prefixNonceAEAD) explicitNonceLen() int { return f.NonceSize() }
-
-func (f *prefixNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
- copy(f.nonce[4:], nonce)
- return f.aead.Seal(out, f.nonce[:], plaintext, additionalData)
-}
-
-func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
- copy(f.nonce[4:], nonce)
- return f.aead.Open(out, f.nonce[:], ciphertext, additionalData)
-}
-
-// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
-// before each call.
-type xorNonceAEAD struct {
- nonceMask [aeadNonceLength]byte
- aead cipher.AEAD
-}
-
-func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number
-func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() }
-func (f *xorNonceAEAD) explicitNonceLen() int { return 0 }
-
-func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
- result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData)
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
-
- return result
-}
-
-func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
- result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData)
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
-
- return result, err
-}
-
-func aeadAESGCM(key, noncePrefix []byte) aead {
- if len(noncePrefix) != noncePrefixLength {
- panic("tls: internal error: wrong nonce length")
- }
- aes, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
-
- ret := &prefixNonceAEAD{aead: aead}
- copy(ret.nonce[:], noncePrefix)
- return ret
-}
-
-// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
-func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
- return aeadAESGCMTLS13(key, fixedNonce)
-}
-
-func aeadAESGCMTLS13(key, nonceMask []byte) aead {
- if len(nonceMask) != aeadNonceLength {
- panic("tls: internal error: wrong nonce length")
- }
- aes, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
-
- ret := &xorNonceAEAD{aead: aead}
- copy(ret.nonceMask[:], nonceMask)
- return ret
-}
-
-func aeadChaCha20Poly1305(key, nonceMask []byte) aead {
- if len(nonceMask) != aeadNonceLength {
- panic("tls: internal error: wrong nonce length")
- }
- aead, err := chacha20poly1305.New(key)
- if err != nil {
- panic(err)
- }
-
- ret := &xorNonceAEAD{aead: aead}
- copy(ret.nonceMask[:], nonceMask)
- return ret
-}
-
-type constantTimeHash interface {
- hash.Hash
- ConstantTimeSum(b []byte) []byte
-}
-
-// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces
-// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC.
-type cthWrapper struct {
- h constantTimeHash
-}
-
-func (c *cthWrapper) Size() int { return c.h.Size() }
-func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() }
-func (c *cthWrapper) Reset() { c.h.Reset() }
-func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) }
-func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
-
-func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
- return func() hash.Hash {
- return &cthWrapper{h().(constantTimeHash)}
- }
-}
-
-// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3.
-func tls10MAC(h hash.Hash, out, seq, header, data, extra []byte) []byte {
- h.Reset()
- h.Write(seq)
- h.Write(header)
- h.Write(data)
- res := h.Sum(out)
- if extra != nil {
- h.Write(extra)
- }
- return res
-}
-
-func rsaKA(version uint16) keyAgreement {
- return rsaKeyAgreement{}
-}
-
-func ecdheECDSAKA(version uint16) keyAgreement {
- return &ecdheKeyAgreement{
- isRSA: false,
- version: version,
- }
-}
-
-func ecdheRSAKA(version uint16) keyAgreement {
- return &ecdheKeyAgreement{
- isRSA: true,
- version: version,
- }
-}
-
-// mutualCipherSuite returns a cipherSuite given a list of supported
-// ciphersuites and the id requested by the peer.
-func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
- for _, id := range have {
- if id == want {
- return cipherSuiteByID(id)
- }
- }
- return nil
-}
-
-func cipherSuiteByID(id uint16) *cipherSuite {
- for _, cipherSuite := range cipherSuites {
- if cipherSuite.id == id {
- return cipherSuite
- }
- }
- return nil
-}
-
-func mutualCipherSuiteTLS13(have []uint16, want uint16) *cipherSuiteTLS13 {
- for _, id := range have {
- if id == want {
- return cipherSuiteTLS13ByID(id)
- }
- }
- return nil
-}
-
-func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 {
- for _, cipherSuite := range cipherSuitesTLS13 {
- if cipherSuite.id == id {
- return cipherSuite
- }
- }
- return nil
-}
-
-// A list of cipher suite IDs that are, or have been, implemented by this
-// package.
-//
-// See https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
-const (
- // TLS 1.0 - 1.2 cipher suites.
- TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
- TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
- TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
- TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c
- TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c
- TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
- TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca8
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca9
-
- // TLS 1.3 cipher suites.
- TLS_AES_128_GCM_SHA256 uint16 = 0x1301
- TLS_AES_256_GCM_SHA384 uint16 = 0x1302
- TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303
-
- // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
- // that the client is doing version fallback. See RFC 7507.
- TLS_FALLBACK_SCSV uint16 = 0x5600
-
- // Legacy names for the corresponding cipher suites with the correct _SHA256
- // suffix, retained for backward compatibility.
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/common.go b/vendor/github.com/marten-seemann/qtls-go1-16/common.go
deleted file mode 100644
index 266f93fef..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/common.go
+++ /dev/null
@@ -1,1576 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "container/list"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha512"
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "io"
- "net"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-const (
- VersionTLS10 = 0x0301
- VersionTLS11 = 0x0302
- VersionTLS12 = 0x0303
- VersionTLS13 = 0x0304
-
- // Deprecated: SSLv3 is cryptographically broken, and is no longer
- // supported by this package. See golang.org/issue/32716.
- VersionSSL30 = 0x0300
-)
-
-const (
- maxPlaintext = 16384 // maximum plaintext payload length
- maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
- maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3
- recordHeaderLen = 5 // record header length
- maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
- maxUselessRecords = 16 // maximum number of consecutive non-advancing records
-)
-
-// TLS record types.
-type recordType uint8
-
-const (
- recordTypeChangeCipherSpec recordType = 20
- recordTypeAlert recordType = 21
- recordTypeHandshake recordType = 22
- recordTypeApplicationData recordType = 23
-)
-
-// TLS handshake message types.
-const (
- typeHelloRequest uint8 = 0
- typeClientHello uint8 = 1
- typeServerHello uint8 = 2
- typeNewSessionTicket uint8 = 4
- typeEndOfEarlyData uint8 = 5
- typeEncryptedExtensions uint8 = 8
- typeCertificate uint8 = 11
- typeServerKeyExchange uint8 = 12
- typeCertificateRequest uint8 = 13
- typeServerHelloDone uint8 = 14
- typeCertificateVerify uint8 = 15
- typeClientKeyExchange uint8 = 16
- typeFinished uint8 = 20
- typeCertificateStatus uint8 = 22
- typeKeyUpdate uint8 = 24
- typeNextProtocol uint8 = 67 // Not IANA assigned
- typeMessageHash uint8 = 254 // synthetic message
-)
-
-// TLS compression types.
-const (
- compressionNone uint8 = 0
-)
-
-type Extension struct {
- Type uint16
- Data []byte
-}
-
-// TLS extension numbers
-const (
- extensionServerName uint16 = 0
- extensionStatusRequest uint16 = 5
- extensionSupportedCurves uint16 = 10 // supported_groups in TLS 1.3, see RFC 8446, Section 4.2.7
- extensionSupportedPoints uint16 = 11
- extensionSignatureAlgorithms uint16 = 13
- extensionALPN uint16 = 16
- extensionSCT uint16 = 18
- extensionSessionTicket uint16 = 35
- extensionPreSharedKey uint16 = 41
- extensionEarlyData uint16 = 42
- extensionSupportedVersions uint16 = 43
- extensionCookie uint16 = 44
- extensionPSKModes uint16 = 45
- extensionCertificateAuthorities uint16 = 47
- extensionSignatureAlgorithmsCert uint16 = 50
- extensionKeyShare uint16 = 51
- extensionRenegotiationInfo uint16 = 0xff01
-)
-
-// TLS signaling cipher suite values
-const (
- scsvRenegotiation uint16 = 0x00ff
-)
-
-type EncryptionLevel uint8
-
-const (
- EncryptionHandshake EncryptionLevel = iota
- Encryption0RTT
- EncryptionApplication
-)
-
-// CurveID is a tls.CurveID
-type CurveID = tls.CurveID
-
-const (
- CurveP256 CurveID = 23
- CurveP384 CurveID = 24
- CurveP521 CurveID = 25
- X25519 CurveID = 29
-)
-
-// TLS 1.3 Key Share. See RFC 8446, Section 4.2.8.
-type keyShare struct {
- group CurveID
- data []byte
-}
-
-// TLS 1.3 PSK Key Exchange Modes. See RFC 8446, Section 4.2.9.
-const (
- pskModePlain uint8 = 0
- pskModeDHE uint8 = 1
-)
-
-// TLS 1.3 PSK Identity. Can be a Session Ticket, or a reference to a saved
-// session. See RFC 8446, Section 4.2.11.
-type pskIdentity struct {
- label []byte
- obfuscatedTicketAge uint32
-}
-
-// TLS Elliptic Curve Point Formats
-// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
-const (
- pointFormatUncompressed uint8 = 0
-)
-
-// TLS CertificateStatusType (RFC 3546)
-const (
- statusTypeOCSP uint8 = 1
-)
-
-// Certificate types (for certificateRequestMsg)
-const (
- certTypeRSASign = 1
- certTypeECDSASign = 64 // ECDSA or EdDSA keys, see RFC 8422, Section 3.
-)
-
-// Signature algorithms (for internal signaling use). Starting at 225 to avoid overlap with
-// TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do.
-const (
- signaturePKCS1v15 uint8 = iota + 225
- signatureRSAPSS
- signatureECDSA
- signatureEd25519
-)
-
-// directSigning is a standard Hash value that signals that no pre-hashing
-// should be performed, and that the input should be signed directly. It is the
-// hash function associated with the Ed25519 signature scheme.
-var directSigning crypto.Hash = 0
-
-// supportedSignatureAlgorithms contains the signature and hash algorithms that
-// the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+
-// CertificateRequest. The two fields are merged to match with TLS 1.3.
-// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
-var supportedSignatureAlgorithms = []SignatureScheme{
- PSSWithSHA256,
- ECDSAWithP256AndSHA256,
- Ed25519,
- PSSWithSHA384,
- PSSWithSHA512,
- PKCS1WithSHA256,
- PKCS1WithSHA384,
- PKCS1WithSHA512,
- ECDSAWithP384AndSHA384,
- ECDSAWithP521AndSHA512,
- PKCS1WithSHA1,
- ECDSAWithSHA1,
-}
-
-// helloRetryRequestRandom is set as the Random value of a ServerHello
-// to signal that the message is actually a HelloRetryRequest.
-var helloRetryRequestRandom = []byte{ // See RFC 8446, Section 4.1.3.
- 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11,
- 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,
- 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E,
- 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C,
-}
-
-const (
- // downgradeCanaryTLS12 or downgradeCanaryTLS11 is embedded in the server
- // random as a downgrade protection if the server would be capable of
- // negotiating a higher version. See RFC 8446, Section 4.1.3.
- downgradeCanaryTLS12 = "DOWNGRD\x01"
- downgradeCanaryTLS11 = "DOWNGRD\x00"
-)
-
-// testingOnlyForceDowngradeCanary is set in tests to force the server side to
-// include downgrade canaries even if it's using its highers supported version.
-var testingOnlyForceDowngradeCanary bool
-
-type ConnectionState = tls.ConnectionState
-
-// ConnectionState records basic TLS details about the connection.
-type connectionState struct {
- // Version is the TLS version used by the connection (e.g. VersionTLS12).
- Version uint16
-
- // HandshakeComplete is true if the handshake has concluded.
- HandshakeComplete bool
-
- // DidResume is true if this connection was successfully resumed from a
- // previous session with a session ticket or similar mechanism.
- DidResume bool
-
- // CipherSuite is the cipher suite negotiated for the connection (e.g.
- // TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_AES_128_GCM_SHA256).
- CipherSuite uint16
-
- // NegotiatedProtocol is the application protocol negotiated with ALPN.
- NegotiatedProtocol string
-
- // NegotiatedProtocolIsMutual used to indicate a mutual NPN negotiation.
- //
- // Deprecated: this value is always true.
- NegotiatedProtocolIsMutual bool
-
- // ServerName is the value of the Server Name Indication extension sent by
- // the client. It's available both on the server and on the client side.
- ServerName string
-
- // PeerCertificates are the parsed certificates sent by the peer, in the
- // order in which they were sent. The first element is the leaf certificate
- // that the connection is verified against.
- //
- // On the client side, it can't be empty. On the server side, it can be
- // empty if Config.ClientAuth is not RequireAnyClientCert or
- // RequireAndVerifyClientCert.
- PeerCertificates []*x509.Certificate
-
- // VerifiedChains is a list of one or more chains where the first element is
- // PeerCertificates[0] and the last element is from Config.RootCAs (on the
- // client side) or Config.ClientCAs (on the server side).
- //
- // On the client side, it's set if Config.InsecureSkipVerify is false. On
- // the server side, it's set if Config.ClientAuth is VerifyClientCertIfGiven
- // (and the peer provided a certificate) or RequireAndVerifyClientCert.
- VerifiedChains [][]*x509.Certificate
-
- // SignedCertificateTimestamps is a list of SCTs provided by the peer
- // through the TLS handshake for the leaf certificate, if any.
- SignedCertificateTimestamps [][]byte
-
- // OCSPResponse is a stapled Online Certificate Status Protocol (OCSP)
- // response provided by the peer for the leaf certificate, if any.
- OCSPResponse []byte
-
- // TLSUnique contains the "tls-unique" channel binding value (see RFC 5929,
- // Section 3). This value will be nil for TLS 1.3 connections and for all
- // resumed connections.
- //
- // Deprecated: there are conditions in which this value might not be unique
- // to a connection. See the Security Considerations sections of RFC 5705 and
- // RFC 7627, and https://mitls.org/pages/attacks/3SHAKE#channelbindings.
- TLSUnique []byte
-
- // ekm is a closure exposed via ExportKeyingMaterial.
- ekm func(label string, context []byte, length int) ([]byte, error)
-}
-
-type ConnectionStateWith0RTT struct {
- ConnectionState
-
- Used0RTT bool // true if 0-RTT was both offered and accepted
-}
-
-// ClientAuthType is tls.ClientAuthType
-type ClientAuthType = tls.ClientAuthType
-
-const (
- NoClientCert = tls.NoClientCert
- RequestClientCert = tls.RequestClientCert
- RequireAnyClientCert = tls.RequireAnyClientCert
- VerifyClientCertIfGiven = tls.VerifyClientCertIfGiven
- RequireAndVerifyClientCert = tls.RequireAndVerifyClientCert
-)
-
-// requiresClientCert reports whether the ClientAuthType requires a client
-// certificate to be provided.
-func requiresClientCert(c ClientAuthType) bool {
- switch c {
- case RequireAnyClientCert, RequireAndVerifyClientCert:
- return true
- default:
- return false
- }
-}
-
-// ClientSessionState contains the state needed by clients to resume TLS
-// sessions.
-type ClientSessionState = tls.ClientSessionState
-
-type clientSessionState struct {
- sessionTicket []uint8 // Encrypted ticket used for session resumption with server
- vers uint16 // TLS version negotiated for the session
- cipherSuite uint16 // Ciphersuite negotiated for the session
- masterSecret []byte // Full handshake MasterSecret, or TLS 1.3 resumption_master_secret
- serverCertificates []*x509.Certificate // Certificate chain presented by the server
- verifiedChains [][]*x509.Certificate // Certificate chains we built for verification
- receivedAt time.Time // When the session ticket was received from the server
- ocspResponse []byte // Stapled OCSP response presented by the server
- scts [][]byte // SCTs presented by the server
-
- // TLS 1.3 fields.
- nonce []byte // Ticket nonce sent by the server, to derive PSK
- useBy time.Time // Expiration of the ticket lifetime as set by the server
- ageAdd uint32 // Random obfuscation factor for sending the ticket age
-}
-
-// ClientSessionCache is a cache of ClientSessionState objects that can be used
-// by a client to resume a TLS session with a given server. ClientSessionCache
-// implementations should expect to be called concurrently from different
-// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not
-// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which
-// are supported via this interface.
-//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/marten-seemann/qtls-go1-15 ClientSessionCache"
-type ClientSessionCache = tls.ClientSessionCache
-
-// SignatureScheme is a tls.SignatureScheme
-type SignatureScheme = tls.SignatureScheme
-
-const (
- // RSASSA-PKCS1-v1_5 algorithms.
- PKCS1WithSHA256 SignatureScheme = 0x0401
- PKCS1WithSHA384 SignatureScheme = 0x0501
- PKCS1WithSHA512 SignatureScheme = 0x0601
-
- // RSASSA-PSS algorithms with public key OID rsaEncryption.
- PSSWithSHA256 SignatureScheme = 0x0804
- PSSWithSHA384 SignatureScheme = 0x0805
- PSSWithSHA512 SignatureScheme = 0x0806
-
- // ECDSA algorithms. Only constrained to a specific curve in TLS 1.3.
- ECDSAWithP256AndSHA256 SignatureScheme = 0x0403
- ECDSAWithP384AndSHA384 SignatureScheme = 0x0503
- ECDSAWithP521AndSHA512 SignatureScheme = 0x0603
-
- // EdDSA algorithms.
- Ed25519 SignatureScheme = 0x0807
-
- // Legacy signature and hash algorithms for TLS 1.2.
- PKCS1WithSHA1 SignatureScheme = 0x0201
- ECDSAWithSHA1 SignatureScheme = 0x0203
-)
-
-// ClientHelloInfo contains information from a ClientHello message in order to
-// guide application logic in the GetCertificate and GetConfigForClient callbacks.
-type ClientHelloInfo = tls.ClientHelloInfo
-
-type clientHelloInfo struct {
- // CipherSuites lists the CipherSuites supported by the client (e.g.
- // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).
- CipherSuites []uint16
-
- // ServerName indicates the name of the server requested by the client
- // in order to support virtual hosting. ServerName is only set if the
- // client is using SNI (see RFC 4366, Section 3.1).
- ServerName string
-
- // SupportedCurves lists the elliptic curves supported by the client.
- // SupportedCurves is set only if the Supported Elliptic Curves
- // Extension is being used (see RFC 4492, Section 5.1.1).
- SupportedCurves []CurveID
-
- // SupportedPoints lists the point formats supported by the client.
- // SupportedPoints is set only if the Supported Point Formats Extension
- // is being used (see RFC 4492, Section 5.1.2).
- SupportedPoints []uint8
-
- // SignatureSchemes lists the signature and hash schemes that the client
- // is willing to verify. SignatureSchemes is set only if the Signature
- // Algorithms Extension is being used (see RFC 5246, Section 7.4.1.4.1).
- SignatureSchemes []SignatureScheme
-
- // SupportedProtos lists the application protocols supported by the client.
- // SupportedProtos is set only if the Application-Layer Protocol
- // Negotiation Extension is being used (see RFC 7301, Section 3.1).
- //
- // Servers can select a protocol by setting Config.NextProtos in a
- // GetConfigForClient return value.
- SupportedProtos []string
-
- // SupportedVersions lists the TLS versions supported by the client.
- // For TLS versions less than 1.3, this is extrapolated from the max
- // version advertised by the client, so values other than the greatest
- // might be rejected if used.
- SupportedVersions []uint16
-
- // Conn is the underlying net.Conn for the connection. Do not read
- // from, or write to, this connection; that will cause the TLS
- // connection to fail.
- Conn net.Conn
-
- // config is embedded by the GetCertificate or GetConfigForClient caller,
- // for use with SupportsCertificate.
- config *Config
-}
-
-// CertificateRequestInfo contains information from a server's
-// CertificateRequest message, which is used to demand a certificate and proof
-// of control from a client.
-type CertificateRequestInfo = tls.CertificateRequestInfo
-
-type certificateRequestInfo struct {
- // AcceptableCAs contains zero or more, DER-encoded, X.501
- // Distinguished Names. These are the names of root or intermediate CAs
- // that the server wishes the returned certificate to be signed by. An
- // empty slice indicates that the server has no preference.
- AcceptableCAs [][]byte
-
- // SignatureSchemes lists the signature schemes that the server is
- // willing to verify.
- SignatureSchemes []SignatureScheme
-
- // Version is the TLS version that was negotiated for this connection.
- Version uint16
-}
-
-// RenegotiationSupport enumerates the different levels of support for TLS
-// renegotiation. TLS renegotiation is the act of performing subsequent
-// handshakes on a connection after the first. This significantly complicates
-// the state machine and has been the source of numerous, subtle security
-// issues. Initiating a renegotiation is not supported, but support for
-// accepting renegotiation requests may be enabled.
-//
-// Even when enabled, the server may not change its identity between handshakes
-// (i.e. the leaf certificate must be the same). Additionally, concurrent
-// handshake and application data flow is not permitted so renegotiation can
-// only be used with protocols that synchronise with the renegotiation, such as
-// HTTPS.
-//
-// Renegotiation is not defined in TLS 1.3.
-type RenegotiationSupport = tls.RenegotiationSupport
-
-const (
- // RenegotiateNever disables renegotiation.
- RenegotiateNever = tls.RenegotiateNever
-
- // RenegotiateOnceAsClient allows a remote server to request
- // renegotiation once per connection.
- RenegotiateOnceAsClient = tls.RenegotiateOnceAsClient
-
- // RenegotiateFreelyAsClient allows a remote server to repeatedly
- // request renegotiation.
- RenegotiateFreelyAsClient = tls.RenegotiateFreelyAsClient
-)
-
-// A Config structure is used to configure a TLS client or server.
-// After one has been passed to a TLS function it must not be
-// modified. A Config may be reused; the tls package will also not
-// modify it.
-type Config = tls.Config
-
-type config struct {
- // Rand provides the source of entropy for nonces and RSA blinding.
- // If Rand is nil, TLS uses the cryptographic random reader in package
- // crypto/rand.
- // The Reader must be safe for use by multiple goroutines.
- Rand io.Reader
-
- // Time returns the current time as the number of seconds since the epoch.
- // If Time is nil, TLS uses time.Now.
- Time func() time.Time
-
- // Certificates contains one or more certificate chains to present to the
- // other side of the connection. The first certificate compatible with the
- // peer's requirements is selected automatically.
- //
- // Server configurations must set one of Certificates, GetCertificate or
- // GetConfigForClient. Clients doing client-authentication may set either
- // Certificates or GetClientCertificate.
- //
- // Note: if there are multiple Certificates, and they don't have the
- // optional field Leaf set, certificate selection will incur a significant
- // per-handshake performance cost.
- Certificates []Certificate
-
- // NameToCertificate maps from a certificate name to an element of
- // Certificates. Note that a certificate name can be of the form
- // '*.example.com' and so doesn't have to be a domain name as such.
- //
- // Deprecated: NameToCertificate only allows associating a single
- // certificate with a given name. Leave this field nil to let the library
- // select the first compatible chain from Certificates.
- NameToCertificate map[string]*Certificate
-
- // GetCertificate returns a Certificate based on the given
- // ClientHelloInfo. It will only be called if the client supplies SNI
- // information or if Certificates is empty.
- //
- // If GetCertificate is nil or returns nil, then the certificate is
- // retrieved from NameToCertificate. If NameToCertificate is nil, the
- // best element of Certificates will be used.
- GetCertificate func(*ClientHelloInfo) (*Certificate, error)
-
- // GetClientCertificate, if not nil, is called when a server requests a
- // certificate from a client. If set, the contents of Certificates will
- // be ignored.
- //
- // If GetClientCertificate returns an error, the handshake will be
- // aborted and that error will be returned. Otherwise
- // GetClientCertificate must return a non-nil Certificate. If
- // Certificate.Certificate is empty then no certificate will be sent to
- // the server. If this is unacceptable to the server then it may abort
- // the handshake.
- //
- // GetClientCertificate may be called multiple times for the same
- // connection if renegotiation occurs or if TLS 1.3 is in use.
- GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error)
-
- // GetConfigForClient, if not nil, is called after a ClientHello is
- // received from a client. It may return a non-nil Config in order to
- // change the Config that will be used to handle this connection. If
- // the returned Config is nil, the original Config will be used. The
- // Config returned by this callback may not be subsequently modified.
- //
- // If GetConfigForClient is nil, the Config passed to Server() will be
- // used for all connections.
- //
- // If SessionTicketKey was explicitly set on the returned Config, or if
- // SetSessionTicketKeys was called on the returned Config, those keys will
- // be used. Otherwise, the original Config keys will be used (and possibly
- // rotated if they are automatically managed).
- GetConfigForClient func(*ClientHelloInfo) (*Config, error)
-
- // VerifyPeerCertificate, if not nil, is called after normal
- // certificate verification by either a TLS client or server. It
- // receives the raw ASN.1 certificates provided by the peer and also
- // any verified chains that normal processing found. If it returns a
- // non-nil error, the handshake is aborted and that error results.
- //
- // If normal verification fails then the handshake will abort before
- // considering this callback. If normal verification is disabled by
- // setting InsecureSkipVerify, or (for a server) when ClientAuth is
- // RequestClientCert or RequireAnyClientCert, then this callback will
- // be considered but the verifiedChains argument will always be nil.
- VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
-
- // VerifyConnection, if not nil, is called after normal certificate
- // verification and after VerifyPeerCertificate by either a TLS client
- // or server. If it returns a non-nil error, the handshake is aborted
- // and that error results.
- //
- // If normal verification fails then the handshake will abort before
- // considering this callback. This callback will run for all connections
- // regardless of InsecureSkipVerify or ClientAuth settings.
- VerifyConnection func(ConnectionState) error
-
- // RootCAs defines the set of root certificate authorities
- // that clients use when verifying server certificates.
- // If RootCAs is nil, TLS uses the host's root CA set.
- RootCAs *x509.CertPool
-
- // NextProtos is a list of supported application level protocols, in
- // order of preference.
- NextProtos []string
-
- // ServerName is used to verify the hostname on the returned
- // certificates unless InsecureSkipVerify is given. It is also included
- // in the client's handshake to support virtual hosting unless it is
- // an IP address.
- ServerName string
-
- // ClientAuth determines the server's policy for
- // TLS Client Authentication. The default is NoClientCert.
- ClientAuth ClientAuthType
-
- // ClientCAs defines the set of root certificate authorities
- // that servers use if required to verify a client certificate
- // by the policy in ClientAuth.
- ClientCAs *x509.CertPool
-
- // InsecureSkipVerify controls whether a client verifies the server's
- // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls
- // accepts any certificate presented by the server and any host name in that
- // certificate. In this mode, TLS is susceptible to machine-in-the-middle
- // attacks unless custom verification is used. This should be used only for
- // testing or in combination with VerifyConnection or VerifyPeerCertificate.
- InsecureSkipVerify bool
-
- // CipherSuites is a list of supported cipher suites for TLS versions up to
- // TLS 1.2. If CipherSuites is nil, a default list of secure cipher suites
- // is used, with a preference order based on hardware performance. The
- // default cipher suites might change over Go versions. Note that TLS 1.3
- // ciphersuites are not configurable.
- CipherSuites []uint16
-
- // PreferServerCipherSuites controls whether the server selects the
- // client's most preferred ciphersuite, or the server's most preferred
- // ciphersuite. If true then the server's preference, as expressed in
- // the order of elements in CipherSuites, is used.
- PreferServerCipherSuites bool
-
- // SessionTicketsDisabled may be set to true to disable session ticket and
- // PSK (resumption) support. Note that on clients, session ticket support is
- // also disabled if ClientSessionCache is nil.
- SessionTicketsDisabled bool
-
- // SessionTicketKey is used by TLS servers to provide session resumption.
- // See RFC 5077 and the PSK mode of RFC 8446. If zero, it will be filled
- // with random data before the first server handshake.
- //
- // Deprecated: if this field is left at zero, session ticket keys will be
- // automatically rotated every day and dropped after seven days. For
- // customizing the rotation schedule or synchronizing servers that are
- // terminating connections for the same host, use SetSessionTicketKeys.
- SessionTicketKey [32]byte
-
- // ClientSessionCache is a cache of ClientSessionState entries for TLS
- // session resumption. It is only used by clients.
- ClientSessionCache ClientSessionCache
-
- // MinVersion contains the minimum TLS version that is acceptable.
- // If zero, TLS 1.0 is currently taken as the minimum.
- MinVersion uint16
-
- // MaxVersion contains the maximum TLS version that is acceptable.
- // If zero, the maximum version supported by this package is used,
- // which is currently TLS 1.3.
- MaxVersion uint16
-
- // CurvePreferences contains the elliptic curves that will be used in
- // an ECDHE handshake, in preference order. If empty, the default will
- // be used. The client will use the first preference as the type for
- // its key share in TLS 1.3. This may change in the future.
- CurvePreferences []CurveID
-
- // DynamicRecordSizingDisabled disables adaptive sizing of TLS records.
- // When true, the largest possible TLS record size is always used. When
- // false, the size of TLS records may be adjusted in an attempt to
- // improve latency.
- DynamicRecordSizingDisabled bool
-
- // Renegotiation controls what types of renegotiation are supported.
- // The default, none, is correct for the vast majority of applications.
- Renegotiation RenegotiationSupport
-
- // KeyLogWriter optionally specifies a destination for TLS master secrets
- // in NSS key log format that can be used to allow external programs
- // such as Wireshark to decrypt TLS connections.
- // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
- // Use of KeyLogWriter compromises security and should only be
- // used for debugging.
- KeyLogWriter io.Writer
-
- // mutex protects sessionTicketKeys and autoSessionTicketKeys.
- mutex sync.RWMutex
- // sessionTicketKeys contains zero or more ticket keys. If set, it means the
- // the keys were set with SessionTicketKey or SetSessionTicketKeys. The
- // first key is used for new tickets and any subsequent keys can be used to
- // decrypt old tickets. The slice contents are not protected by the mutex
- // and are immutable.
- sessionTicketKeys []ticketKey
- // autoSessionTicketKeys is like sessionTicketKeys but is owned by the
- // auto-rotation logic. See Config.ticketKeys.
- autoSessionTicketKeys []ticketKey
-}
-
-// A RecordLayer handles encrypting and decrypting of TLS messages.
-type RecordLayer interface {
- SetReadKey(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte)
- SetWriteKey(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte)
- ReadHandshakeMessage() ([]byte, error)
- WriteRecord([]byte) (int, error)
- SendAlert(uint8)
-}
-
-type ExtraConfig struct {
- // GetExtensions, if not nil, is called before a message that allows
- // sending of extensions is sent.
- // Currently only implemented for the ClientHello message (for the client)
- // and for the EncryptedExtensions message (for the server).
- // Only valid for TLS 1.3.
- GetExtensions func(handshakeMessageType uint8) []Extension
-
- // ReceivedExtensions, if not nil, is called when a message that allows the
- // inclusion of extensions is received.
- // It is called with an empty slice of extensions, if the message didn't
- // contain any extensions.
- // Currently only implemented for the ClientHello message (sent by the
- // client) and for the EncryptedExtensions message (sent by the server).
- // Only valid for TLS 1.3.
- ReceivedExtensions func(handshakeMessageType uint8, exts []Extension)
-
- // AlternativeRecordLayer is used by QUIC
- AlternativeRecordLayer RecordLayer
-
- // Enforce the selection of a supported application protocol.
- // Only works for TLS 1.3.
- // If enabled, client and server have to agree on an application protocol.
- // Otherwise, connection establishment fails.
- EnforceNextProtoSelection bool
-
- // If MaxEarlyData is greater than 0, the client will be allowed to send early
- // data when resuming a session.
- // Requires the AlternativeRecordLayer to be set.
- //
- // It has no meaning on the client.
- MaxEarlyData uint32
-
- // The Accept0RTT callback is called when the client offers 0-RTT.
- // The server then has to decide if it wants to accept or reject 0-RTT.
- // It is only used for servers.
- Accept0RTT func(appData []byte) bool
-
- // 0RTTRejected is called when the server rejectes 0-RTT.
- // It is only used for clients.
- Rejected0RTT func()
-
- // If set, the client will export the 0-RTT key when resuming a session that
- // allows sending of early data.
- // Requires the AlternativeRecordLayer to be set.
- //
- // It has no meaning to the server.
- Enable0RTT bool
-
- // Is called when the client saves a session ticket to the session ticket.
- // This gives the application the opportunity to save some data along with the ticket,
- // which can be restored when the session ticket is used.
- GetAppDataForSessionState func() []byte
-
- // Is called when the client uses a session ticket.
- // Restores the application data that was saved earlier on GetAppDataForSessionTicket.
- SetAppDataFromSessionState func([]byte)
-}
-
-// Clone clones.
-func (c *ExtraConfig) Clone() *ExtraConfig {
- return &ExtraConfig{
- GetExtensions: c.GetExtensions,
- ReceivedExtensions: c.ReceivedExtensions,
- AlternativeRecordLayer: c.AlternativeRecordLayer,
- EnforceNextProtoSelection: c.EnforceNextProtoSelection,
- MaxEarlyData: c.MaxEarlyData,
- Enable0RTT: c.Enable0RTT,
- Accept0RTT: c.Accept0RTT,
- Rejected0RTT: c.Rejected0RTT,
- GetAppDataForSessionState: c.GetAppDataForSessionState,
- SetAppDataFromSessionState: c.SetAppDataFromSessionState,
- }
-}
-
-func (c *ExtraConfig) usesAlternativeRecordLayer() bool {
- return c != nil && c.AlternativeRecordLayer != nil
-}
-
-const (
- // ticketKeyNameLen is the number of bytes of identifier that is prepended to
- // an encrypted session ticket in order to identify the key used to encrypt it.
- ticketKeyNameLen = 16
-
- // ticketKeyLifetime is how long a ticket key remains valid and can be used to
- // resume a client connection.
- ticketKeyLifetime = 7 * 24 * time.Hour // 7 days
-
- // ticketKeyRotation is how often the server should rotate the session ticket key
- // that is used for new tickets.
- ticketKeyRotation = 24 * time.Hour
-)
-
-// ticketKey is the internal representation of a session ticket key.
-type ticketKey struct {
- // keyName is an opaque byte string that serves to identify the session
- // ticket key. It's exposed as plaintext in every session ticket.
- keyName [ticketKeyNameLen]byte
- aesKey [16]byte
- hmacKey [16]byte
- // created is the time at which this ticket key was created. See Config.ticketKeys.
- created time.Time
-}
-
-// ticketKeyFromBytes converts from the external representation of a session
-// ticket key to a ticketKey. Externally, session ticket keys are 32 random
-// bytes and this function expands that into sufficient name and key material.
-func (c *config) ticketKeyFromBytes(b [32]byte) (key ticketKey) {
- hashed := sha512.Sum512(b[:])
- copy(key.keyName[:], hashed[:ticketKeyNameLen])
- copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16])
- copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32])
- key.created = c.time()
- return key
-}
-
-// maxSessionTicketLifetime is the maximum allowed lifetime of a TLS 1.3 session
-// ticket, and the lifetime we set for tickets we send.
-const maxSessionTicketLifetime = 7 * 24 * time.Hour
-
-// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a Config that is
-// being used concurrently by a TLS client or server.
-func (c *config) Clone() *config {
- if c == nil {
- return nil
- }
- c.mutex.RLock()
- defer c.mutex.RUnlock()
- return &config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- GetClientCertificate: c.GetClientCertificate,
- GetConfigForClient: c.GetConfigForClient,
- VerifyPeerCertificate: c.VerifyPeerCertificate,
- VerifyConnection: c.VerifyConnection,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- KeyLogWriter: c.KeyLogWriter,
- sessionTicketKeys: c.sessionTicketKeys,
- autoSessionTicketKeys: c.autoSessionTicketKeys,
- }
-}
-
-// deprecatedSessionTicketKey is set as the prefix of SessionTicketKey if it was
-// randomized for backwards compatibility but is not in use.
-var deprecatedSessionTicketKey = []byte("DEPRECATED")
-
-// initLegacySessionTicketKeyRLocked ensures the legacy SessionTicketKey field is
-// randomized if empty, and that sessionTicketKeys is populated from it otherwise.
-func (c *config) initLegacySessionTicketKeyRLocked() {
- // Don't write if SessionTicketKey is already defined as our deprecated string,
- // or if it is defined by the user but sessionTicketKeys is already set.
- if c.SessionTicketKey != [32]byte{} &&
- (bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) || len(c.sessionTicketKeys) > 0) {
- return
- }
-
- // We need to write some data, so get an exclusive lock and re-check any conditions.
- c.mutex.RUnlock()
- defer c.mutex.RLock()
- c.mutex.Lock()
- defer c.mutex.Unlock()
- if c.SessionTicketKey == [32]byte{} {
- if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil {
- panic(fmt.Sprintf("tls: unable to generate random session ticket key: %v", err))
- }
- // Write the deprecated prefix at the beginning so we know we created
- // it. This key with the DEPRECATED prefix isn't used as an actual
- // session ticket key, and is only randomized in case the application
- // reuses it for some reason.
- copy(c.SessionTicketKey[:], deprecatedSessionTicketKey)
- } else if !bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) && len(c.sessionTicketKeys) == 0 {
- c.sessionTicketKeys = []ticketKey{c.ticketKeyFromBytes(c.SessionTicketKey)}
- }
-
-}
-
-// ticketKeys returns the ticketKeys for this connection.
-// If configForClient has explicitly set keys, those will
-// be returned. Otherwise, the keys on c will be used and
-// may be rotated if auto-managed.
-// During rotation, any expired session ticket keys are deleted from
-// c.sessionTicketKeys. If the session ticket key that is currently
-// encrypting tickets (ie. the first ticketKey in c.sessionTicketKeys)
-// is not fresh, then a new session ticket key will be
-// created and prepended to c.sessionTicketKeys.
-func (c *config) ticketKeys(configForClient *config) []ticketKey {
- // If the ConfigForClient callback returned a Config with explicitly set
- // keys, use those, otherwise just use the original Config.
- if configForClient != nil {
- configForClient.mutex.RLock()
- if configForClient.SessionTicketsDisabled {
- return nil
- }
- configForClient.initLegacySessionTicketKeyRLocked()
- if len(configForClient.sessionTicketKeys) != 0 {
- ret := configForClient.sessionTicketKeys
- configForClient.mutex.RUnlock()
- return ret
- }
- configForClient.mutex.RUnlock()
- }
-
- c.mutex.RLock()
- defer c.mutex.RUnlock()
- if c.SessionTicketsDisabled {
- return nil
- }
- c.initLegacySessionTicketKeyRLocked()
- if len(c.sessionTicketKeys) != 0 {
- return c.sessionTicketKeys
- }
- // Fast path for the common case where the key is fresh enough.
- if len(c.autoSessionTicketKeys) > 0 && c.time().Sub(c.autoSessionTicketKeys[0].created) < ticketKeyRotation {
- return c.autoSessionTicketKeys
- }
-
- // autoSessionTicketKeys are managed by auto-rotation.
- c.mutex.RUnlock()
- defer c.mutex.RLock()
- c.mutex.Lock()
- defer c.mutex.Unlock()
- // Re-check the condition in case it changed since obtaining the new lock.
- if len(c.autoSessionTicketKeys) == 0 || c.time().Sub(c.autoSessionTicketKeys[0].created) >= ticketKeyRotation {
- var newKey [32]byte
- if _, err := io.ReadFull(c.rand(), newKey[:]); err != nil {
- panic(fmt.Sprintf("unable to generate random session ticket key: %v", err))
- }
- valid := make([]ticketKey, 0, len(c.autoSessionTicketKeys)+1)
- valid = append(valid, c.ticketKeyFromBytes(newKey))
- for _, k := range c.autoSessionTicketKeys {
- // While rotating the current key, also remove any expired ones.
- if c.time().Sub(k.created) < ticketKeyLifetime {
- valid = append(valid, k)
- }
- }
- c.autoSessionTicketKeys = valid
- }
- return c.autoSessionTicketKeys
-}
-
-// SetSessionTicketKeys updates the session ticket keys for a server.
-//
-// The first key will be used when creating new tickets, while all keys can be
-// used for decrypting tickets. It is safe to call this function while the
-// server is running in order to rotate the session ticket keys. The function
-// will panic if keys is empty.
-//
-// Calling this function will turn off automatic session ticket key rotation.
-//
-// If multiple servers are terminating connections for the same host they should
-// all have the same session ticket keys. If the session ticket keys leaks,
-// previously recorded and future TLS connections using those keys might be
-// compromised.
-func (c *config) SetSessionTicketKeys(keys [][32]byte) {
- if len(keys) == 0 {
- panic("tls: keys must have at least one key")
- }
-
- newKeys := make([]ticketKey, len(keys))
- for i, bytes := range keys {
- newKeys[i] = c.ticketKeyFromBytes(bytes)
- }
-
- c.mutex.Lock()
- c.sessionTicketKeys = newKeys
- c.mutex.Unlock()
-}
-
-func (c *config) rand() io.Reader {
- r := c.Rand
- if r == nil {
- return rand.Reader
- }
- return r
-}
-
-func (c *config) time() time.Time {
- t := c.Time
- if t == nil {
- t = time.Now
- }
- return t()
-}
-
-func (c *config) cipherSuites() []uint16 {
- s := c.CipherSuites
- if s == nil {
- s = defaultCipherSuites()
- }
- return s
-}
-
-var supportedVersions = []uint16{
- VersionTLS13,
- VersionTLS12,
- VersionTLS11,
- VersionTLS10,
-}
-
-func (c *config) supportedVersions() []uint16 {
- versions := make([]uint16, 0, len(supportedVersions))
- for _, v := range supportedVersions {
- if c != nil && c.MinVersion != 0 && v < c.MinVersion {
- continue
- }
- if c != nil && c.MaxVersion != 0 && v > c.MaxVersion {
- continue
- }
- versions = append(versions, v)
- }
- return versions
-}
-
-func (c *config) maxSupportedVersion() uint16 {
- supportedVersions := c.supportedVersions()
- if len(supportedVersions) == 0 {
- return 0
- }
- return supportedVersions[0]
-}
-
-// supportedVersionsFromMax returns a list of supported versions derived from a
-// legacy maximum version value. Note that only versions supported by this
-// library are returned. Any newer peer will use supportedVersions anyway.
-func supportedVersionsFromMax(maxVersion uint16) []uint16 {
- versions := make([]uint16, 0, len(supportedVersions))
- for _, v := range supportedVersions {
- if v > maxVersion {
- continue
- }
- versions = append(versions, v)
- }
- return versions
-}
-
-var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521}
-
-func (c *config) curvePreferences() []CurveID {
- if c == nil || len(c.CurvePreferences) == 0 {
- return defaultCurvePreferences
- }
- return c.CurvePreferences
-}
-
-func (c *config) supportsCurve(curve CurveID) bool {
- for _, cc := range c.curvePreferences() {
- if cc == curve {
- return true
- }
- }
- return false
-}
-
-// mutualVersion returns the protocol version to use given the advertised
-// versions of the peer. Priority is given to the peer preference order.
-func (c *config) mutualVersion(peerVersions []uint16) (uint16, bool) {
- supportedVersions := c.supportedVersions()
- for _, peerVersion := range peerVersions {
- for _, v := range supportedVersions {
- if v == peerVersion {
- return v, true
- }
- }
- }
- return 0, false
-}
-
-var errNoCertificates = errors.New("tls: no certificates configured")
-
-// getCertificate returns the best certificate for the given ClientHelloInfo,
-// defaulting to the first element of c.Certificates.
-func (c *config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) {
- if c.GetCertificate != nil &&
- (len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) {
- cert, err := c.GetCertificate(clientHello)
- if cert != nil || err != nil {
- return cert, err
- }
- }
-
- if len(c.Certificates) == 0 {
- return nil, errNoCertificates
- }
-
- if len(c.Certificates) == 1 {
- // There's only one choice, so no point doing any work.
- return &c.Certificates[0], nil
- }
-
- if c.NameToCertificate != nil {
- name := strings.ToLower(clientHello.ServerName)
- if cert, ok := c.NameToCertificate[name]; ok {
- return cert, nil
- }
- if len(name) > 0 {
- labels := strings.Split(name, ".")
- labels[0] = "*"
- wildcardName := strings.Join(labels, ".")
- if cert, ok := c.NameToCertificate[wildcardName]; ok {
- return cert, nil
- }
- }
- }
-
- for _, cert := range c.Certificates {
- if err := clientHello.SupportsCertificate(&cert); err == nil {
- return &cert, nil
- }
- }
-
- // If nothing matches, return the first certificate.
- return &c.Certificates[0], nil
-}
-
-// SupportsCertificate returns nil if the provided certificate is supported by
-// the client that sent the ClientHello. Otherwise, it returns an error
-// describing the reason for the incompatibility.
-//
-// If this ClientHelloInfo was passed to a GetConfigForClient or GetCertificate
-// callback, this method will take into account the associated Config. Note that
-// if GetConfigForClient returns a different Config, the change can't be
-// accounted for by this method.
-//
-// This function will call x509.ParseCertificate unless c.Leaf is set, which can
-// incur a significant performance cost.
-func (chi *clientHelloInfo) SupportsCertificate(c *Certificate) error {
- // Note we don't currently support certificate_authorities nor
- // signature_algorithms_cert, and don't check the algorithms of the
- // signatures on the chain (which anyway are a SHOULD, see RFC 8446,
- // Section 4.4.2.2).
-
- config := chi.config
- if config == nil {
- config = &Config{}
- }
- conf := fromConfig(config)
- vers, ok := conf.mutualVersion(chi.SupportedVersions)
- if !ok {
- return errors.New("no mutually supported protocol versions")
- }
-
- // If the client specified the name they are trying to connect to, the
- // certificate needs to be valid for it.
- if chi.ServerName != "" {
- x509Cert, err := leafCertificate(c)
- if err != nil {
- return fmt.Errorf("failed to parse certificate: %w", err)
- }
- if err := x509Cert.VerifyHostname(chi.ServerName); err != nil {
- return fmt.Errorf("certificate is not valid for requested server name: %w", err)
- }
- }
-
- // supportsRSAFallback returns nil if the certificate and connection support
- // the static RSA key exchange, and unsupported otherwise. The logic for
- // supporting static RSA is completely disjoint from the logic for
- // supporting signed key exchanges, so we just check it as a fallback.
- supportsRSAFallback := func(unsupported error) error {
- // TLS 1.3 dropped support for the static RSA key exchange.
- if vers == VersionTLS13 {
- return unsupported
- }
- // The static RSA key exchange works by decrypting a challenge with the
- // RSA private key, not by signing, so check the PrivateKey implements
- // crypto.Decrypter, like *rsa.PrivateKey does.
- if priv, ok := c.PrivateKey.(crypto.Decrypter); ok {
- if _, ok := priv.Public().(*rsa.PublicKey); !ok {
- return unsupported
- }
- } else {
- return unsupported
- }
- // Finally, there needs to be a mutual cipher suite that uses the static
- // RSA key exchange instead of ECDHE.
- rsaCipherSuite := selectCipherSuite(chi.CipherSuites, conf.cipherSuites(), func(c *cipherSuite) bool {
- if c.flags&suiteECDHE != 0 {
- return false
- }
- if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
- })
- if rsaCipherSuite == nil {
- return unsupported
- }
- return nil
- }
-
- // If the client sent the signature_algorithms extension, ensure it supports
- // schemes we can use with this certificate and TLS version.
- if len(chi.SignatureSchemes) > 0 {
- if _, err := selectSignatureScheme(vers, c, chi.SignatureSchemes); err != nil {
- return supportsRSAFallback(err)
- }
- }
-
- // In TLS 1.3 we are done because supported_groups is only relevant to the
- // ECDHE computation, point format negotiation is removed, cipher suites are
- // only relevant to the AEAD choice, and static RSA does not exist.
- if vers == VersionTLS13 {
- return nil
- }
-
- // The only signed key exchange we support is ECDHE.
- if !supportsECDHE(conf, chi.SupportedCurves, chi.SupportedPoints) {
- return supportsRSAFallback(errors.New("client doesn't support ECDHE, can only use legacy RSA key exchange"))
- }
-
- var ecdsaCipherSuite bool
- if priv, ok := c.PrivateKey.(crypto.Signer); ok {
- switch pub := priv.Public().(type) {
- case *ecdsa.PublicKey:
- var curve CurveID
- switch pub.Curve {
- case elliptic.P256():
- curve = CurveP256
- case elliptic.P384():
- curve = CurveP384
- case elliptic.P521():
- curve = CurveP521
- default:
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
- var curveOk bool
- for _, c := range chi.SupportedCurves {
- if c == curve && conf.supportsCurve(c) {
- curveOk = true
- break
- }
- }
- if !curveOk {
- return errors.New("client doesn't support certificate curve")
- }
- ecdsaCipherSuite = true
- case ed25519.PublicKey:
- if vers < VersionTLS12 || len(chi.SignatureSchemes) == 0 {
- return errors.New("connection doesn't support Ed25519")
- }
- ecdsaCipherSuite = true
- case *rsa.PublicKey:
- default:
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
- } else {
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
-
- // Make sure that there is a mutually supported cipher suite that works with
- // this certificate. Cipher suite selection will then apply the logic in
- // reverse to pick it. See also serverHandshakeState.cipherSuiteOk.
- cipherSuite := selectCipherSuite(chi.CipherSuites, conf.cipherSuites(), func(c *cipherSuite) bool {
- if c.flags&suiteECDHE == 0 {
- return false
- }
- if c.flags&suiteECSign != 0 {
- if !ecdsaCipherSuite {
- return false
- }
- } else {
- if ecdsaCipherSuite {
- return false
- }
- }
- if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
- })
- if cipherSuite == nil {
- return supportsRSAFallback(errors.New("client doesn't support any cipher suites compatible with the certificate"))
- }
-
- return nil
-}
-
-// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate
-// from the CommonName and SubjectAlternateName fields of each of the leaf
-// certificates.
-//
-// Deprecated: NameToCertificate only allows associating a single certificate
-// with a given name. Leave that field nil to let the library select the first
-// compatible chain from Certificates.
-func (c *config) BuildNameToCertificate() {
- c.NameToCertificate = make(map[string]*Certificate)
- for i := range c.Certificates {
- cert := &c.Certificates[i]
- x509Cert, err := leafCertificate(cert)
- if err != nil {
- continue
- }
- // If SANs are *not* present, some clients will consider the certificate
- // valid for the name in the Common Name.
- if x509Cert.Subject.CommonName != "" && len(x509Cert.DNSNames) == 0 {
- c.NameToCertificate[x509Cert.Subject.CommonName] = cert
- }
- for _, san := range x509Cert.DNSNames {
- c.NameToCertificate[san] = cert
- }
- }
-}
-
-const (
- keyLogLabelTLS12 = "CLIENT_RANDOM"
- keyLogLabelEarlyTraffic = "CLIENT_EARLY_TRAFFIC_SECRET"
- keyLogLabelClientHandshake = "CLIENT_HANDSHAKE_TRAFFIC_SECRET"
- keyLogLabelServerHandshake = "SERVER_HANDSHAKE_TRAFFIC_SECRET"
- keyLogLabelClientTraffic = "CLIENT_TRAFFIC_SECRET_0"
- keyLogLabelServerTraffic = "SERVER_TRAFFIC_SECRET_0"
-)
-
-func (c *config) writeKeyLog(label string, clientRandom, secret []byte) error {
- if c.KeyLogWriter == nil {
- return nil
- }
-
- logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))
-
- writerMutex.Lock()
- _, err := c.KeyLogWriter.Write(logLine)
- writerMutex.Unlock()
-
- return err
-}
-
-// writerMutex protects all KeyLogWriters globally. It is rarely enabled,
-// and is only for debugging, so a global mutex saves space.
-var writerMutex sync.Mutex
-
-// A Certificate is a chain of one or more certificates, leaf first.
-type Certificate = tls.Certificate
-
-// leaf returns the parsed leaf certificate, either from c.Leaf or by parsing
-// the corresponding c.Certificate[0].
-func leafCertificate(c *Certificate) (*x509.Certificate, error) {
- if c.Leaf != nil {
- return c.Leaf, nil
- }
- return x509.ParseCertificate(c.Certificate[0])
-}
-
-type handshakeMessage interface {
- marshal() []byte
- unmarshal([]byte) bool
-}
-
-// lruSessionCache is a ClientSessionCache implementation that uses an LRU
-// caching strategy.
-type lruSessionCache struct {
- sync.Mutex
-
- m map[string]*list.Element
- q *list.List
- capacity int
-}
-
-type lruSessionCacheEntry struct {
- sessionKey string
- state *ClientSessionState
-}
-
-// NewLRUClientSessionCache returns a ClientSessionCache with the given
-// capacity that uses an LRU strategy. If capacity is < 1, a default capacity
-// is used instead.
-func NewLRUClientSessionCache(capacity int) ClientSessionCache {
- const defaultSessionCacheCapacity = 64
-
- if capacity < 1 {
- capacity = defaultSessionCacheCapacity
- }
- return &lruSessionCache{
- m: make(map[string]*list.Element),
- q: list.New(),
- capacity: capacity,
- }
-}
-
-// Put adds the provided (sessionKey, cs) pair to the cache. If cs is nil, the entry
-// corresponding to sessionKey is removed from the cache instead.
-func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) {
- c.Lock()
- defer c.Unlock()
-
- if elem, ok := c.m[sessionKey]; ok {
- if cs == nil {
- c.q.Remove(elem)
- delete(c.m, sessionKey)
- } else {
- entry := elem.Value.(*lruSessionCacheEntry)
- entry.state = cs
- c.q.MoveToFront(elem)
- }
- return
- }
-
- if c.q.Len() < c.capacity {
- entry := &lruSessionCacheEntry{sessionKey, cs}
- c.m[sessionKey] = c.q.PushFront(entry)
- return
- }
-
- elem := c.q.Back()
- entry := elem.Value.(*lruSessionCacheEntry)
- delete(c.m, entry.sessionKey)
- entry.sessionKey = sessionKey
- entry.state = cs
- c.q.MoveToFront(elem)
- c.m[sessionKey] = elem
-}
-
-// Get returns the ClientSessionState value associated with a given key. It
-// returns (nil, false) if no value is found.
-func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) {
- c.Lock()
- defer c.Unlock()
-
- if elem, ok := c.m[sessionKey]; ok {
- c.q.MoveToFront(elem)
- return elem.Value.(*lruSessionCacheEntry).state, true
- }
- return nil, false
-}
-
-var emptyConfig Config
-
-func defaultConfig() *Config {
- return &emptyConfig
-}
-
-var (
- once sync.Once
- varDefaultCipherSuites []uint16
- varDefaultCipherSuitesTLS13 []uint16
-)
-
-func defaultCipherSuites() []uint16 {
- once.Do(initDefaultCipherSuites)
- return varDefaultCipherSuites
-}
-
-func defaultCipherSuitesTLS13() []uint16 {
- once.Do(initDefaultCipherSuites)
- return varDefaultCipherSuitesTLS13
-}
-
-func initDefaultCipherSuites() {
- var topCipherSuites []uint16
-
- if hasAESGCMHardwareSupport {
- // If AES-GCM hardware is provided then prioritise AES-GCM
- // cipher suites.
- topCipherSuites = []uint16{
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
- varDefaultCipherSuitesTLS13 = []uint16{
- TLS_AES_128_GCM_SHA256,
- TLS_CHACHA20_POLY1305_SHA256,
- TLS_AES_256_GCM_SHA384,
- }
- } else {
- // Without AES-GCM hardware, we put the ChaCha20-Poly1305
- // cipher suites first.
- topCipherSuites = []uint16{
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- }
- varDefaultCipherSuitesTLS13 = []uint16{
- TLS_CHACHA20_POLY1305_SHA256,
- TLS_AES_128_GCM_SHA256,
- TLS_AES_256_GCM_SHA384,
- }
- }
-
- varDefaultCipherSuites = make([]uint16, 0, len(cipherSuites))
- varDefaultCipherSuites = append(varDefaultCipherSuites, topCipherSuites...)
-
-NextCipherSuite:
- for _, suite := range cipherSuites {
- if suite.flags&suiteDefaultOff != 0 {
- continue
- }
- for _, existing := range varDefaultCipherSuites {
- if existing == suite.id {
- continue NextCipherSuite
- }
- }
- varDefaultCipherSuites = append(varDefaultCipherSuites, suite.id)
- }
-}
-
-func unexpectedMessageError(wanted, got interface{}) error {
- return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted)
-}
-
-func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool {
- for _, s := range supportedSignatureAlgorithms {
- if s == sigAlg {
- return true
- }
- }
- return false
-}
-
-var aesgcmCiphers = map[uint16]bool{
- // 1.2
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: true,
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: true,
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: true,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: true,
- // 1.3
- TLS_AES_128_GCM_SHA256: true,
- TLS_AES_256_GCM_SHA384: true,
-}
-
-var nonAESGCMAEADCiphers = map[uint16]bool{
- // 1.2
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: true,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: true,
- // 1.3
- TLS_CHACHA20_POLY1305_SHA256: true,
-}
-
-// aesgcmPreferred returns whether the first valid cipher in the preference list
-// is an AES-GCM cipher, implying the peer has hardware support for it.
-func aesgcmPreferred(ciphers []uint16) bool {
- for _, cID := range ciphers {
- c := cipherSuiteByID(cID)
- if c == nil {
- c13 := cipherSuiteTLS13ByID(cID)
- if c13 == nil {
- continue
- }
- return aesgcmCiphers[cID]
- }
- return aesgcmCiphers[cID]
- }
- return false
-}
-
-// deprioritizeAES reorders cipher preference lists by rearranging
-// adjacent AEAD ciphers such that AES-GCM based ciphers are moved
-// after other AEAD ciphers. It returns a fresh slice.
-func deprioritizeAES(ciphers []uint16) []uint16 {
- reordered := make([]uint16, len(ciphers))
- copy(reordered, ciphers)
- sort.SliceStable(reordered, func(i, j int) bool {
- return nonAESGCMAEADCiphers[reordered[i]] && aesgcmCiphers[reordered[j]]
- })
- return reordered
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/common_js.go b/vendor/github.com/marten-seemann/qtls-go1-16/common_js.go
deleted file mode 100644
index 97e6ecefd..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/common_js.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build js
-
-package qtls
-
-var (
- hasGCMAsmAMD64 = false
- hasGCMAsmARM64 = false
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X = false
-
- hasAESGCMHardwareSupport = false
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/common_nojs.go b/vendor/github.com/marten-seemann/qtls-go1-16/common_nojs.go
deleted file mode 100644
index 5e56e0fb3..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/common_nojs.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !js
-
-package qtls
-
-import (
- "runtime"
-
- "golang.org/x/sys/cpu"
-)
-
-var (
- hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasAESGCMHardwareSupport = runtime.GOARCH == "amd64" && hasGCMAsmAMD64 ||
- runtime.GOARCH == "arm64" && hasGCMAsmARM64 ||
- runtime.GOARCH == "s390x" && hasGCMAsmS390X
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/conn.go b/vendor/github.com/marten-seemann/qtls-go1-16/conn.go
deleted file mode 100644
index fa5eb3f10..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/conn.go
+++ /dev/null
@@ -1,1536 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TLS low level connection and record layer
-
-package qtls
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// A Conn represents a secured connection.
-// It implements the net.Conn interface.
-type Conn struct {
- // constant
- conn net.Conn
- isClient bool
- handshakeFn func() error // (*Conn).clientHandshake or serverHandshake
-
- // handshakeStatus is 1 if the connection is currently transferring
- // application data (i.e. is not currently processing a handshake).
- // This field is only to be accessed with sync/atomic.
- handshakeStatus uint32
- // constant after handshake; protected by handshakeMutex
- handshakeMutex sync.Mutex
- handshakeErr error // error resulting from handshake
- vers uint16 // TLS version
- haveVers bool // version has been negotiated
- config *config // configuration passed to constructor
- // handshakes counts the number of handshakes performed on the
- // connection so far. If renegotiation is disabled then this is either
- // zero or one.
- extraConfig *ExtraConfig
-
- handshakes int
- didResume bool // whether this connection was a session resumption
- cipherSuite uint16
- ocspResponse []byte // stapled OCSP response
- scts [][]byte // signed certificate timestamps from server
- peerCertificates []*x509.Certificate
- // verifiedChains contains the certificate chains that we built, as
- // opposed to the ones presented by the server.
- verifiedChains [][]*x509.Certificate
- // serverName contains the server name indicated by the client, if any.
- serverName string
- // secureRenegotiation is true if the server echoed the secure
- // renegotiation extension. (This is meaningless as a server because
- // renegotiation is not supported in that case.)
- secureRenegotiation bool
- // ekm is a closure for exporting keying material.
- ekm func(label string, context []byte, length int) ([]byte, error)
- // For the client:
- // resumptionSecret is the resumption_master_secret for handling
- // NewSessionTicket messages. nil if config.SessionTicketsDisabled.
- // For the server:
- // resumptionSecret is the resumption_master_secret for generating
- // NewSessionTicket messages. Only used when the alternative record
- // layer is set. nil if config.SessionTicketsDisabled.
- resumptionSecret []byte
-
- // ticketKeys is the set of active session ticket keys for this
- // connection. The first one is used to encrypt new tickets and
- // all are tried to decrypt tickets.
- ticketKeys []ticketKey
-
- // clientFinishedIsFirst is true if the client sent the first Finished
- // message during the most recent handshake. This is recorded because
- // the first transmitted Finished message is the tls-unique
- // channel-binding value.
- clientFinishedIsFirst bool
-
- // closeNotifyErr is any error from sending the alertCloseNotify record.
- closeNotifyErr error
- // closeNotifySent is true if the Conn attempted to send an
- // alertCloseNotify record.
- closeNotifySent bool
-
- // clientFinished and serverFinished contain the Finished message sent
- // by the client or server in the most recent handshake. This is
- // retained to support the renegotiation extension and tls-unique
- // channel-binding.
- clientFinished [12]byte
- serverFinished [12]byte
-
- // clientProtocol is the negotiated ALPN protocol.
- clientProtocol string
-
- // input/output
- in, out halfConn
- rawInput bytes.Buffer // raw input, starting with a record header
- input bytes.Reader // application data waiting to be read, from rawInput.Next
- hand bytes.Buffer // handshake data waiting to be read
- buffering bool // whether records are buffered in sendBuf
- sendBuf []byte // a buffer of records waiting to be sent
-
- // bytesSent counts the bytes of application data sent.
- // packetsSent counts packets.
- bytesSent int64
- packetsSent int64
-
- // retryCount counts the number of consecutive non-advancing records
- // received by Conn.readRecord. That is, records that neither advance the
- // handshake, nor deliver application data. Protected by in.Mutex.
- retryCount int
-
- // activeCall is an atomic int32; the low bit is whether Close has
- // been called. the rest of the bits are the number of goroutines
- // in Conn.Write.
- activeCall int32
-
- used0RTT bool
-
- tmp [16]byte
-}
-
-// Access to net.Conn methods.
-// Cannot just embed net.Conn because that would
-// export the struct field too.
-
-// LocalAddr returns the local network address.
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the remote network address.
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// SetDeadline sets the read and write deadlines associated with the connection.
-// A zero value for t means Read and Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
-func (c *Conn) SetDeadline(t time.Time) error {
- return c.conn.SetDeadline(t)
-}
-
-// SetReadDeadline sets the read deadline on the underlying connection.
-// A zero value for t means Read will not time out.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetWriteDeadline sets the write deadline on the underlying connection.
-// A zero value for t means Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return c.conn.SetWriteDeadline(t)
-}
-
-// A halfConn represents one direction of the record layer
-// connection, either sending or receiving.
-type halfConn struct {
- sync.Mutex
-
- err error // first permanent error
- version uint16 // protocol version
- cipher interface{} // cipher algorithm
- mac hash.Hash
- seq [8]byte // 64-bit sequence number
-
- scratchBuf [13]byte // to avoid allocs; interface method args escape
-
- nextCipher interface{} // next encryption state
- nextMac hash.Hash // next MAC algorithm
-
- trafficSecret []byte // current TLS 1.3 traffic secret
-
- setKeyCallback func(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte)
-}
-
-type permanentError struct {
- err net.Error
-}
-
-func (e *permanentError) Error() string { return e.err.Error() }
-func (e *permanentError) Unwrap() error { return e.err }
-func (e *permanentError) Timeout() bool { return e.err.Timeout() }
-func (e *permanentError) Temporary() bool { return false }
-
-func (hc *halfConn) setErrorLocked(err error) error {
- if e, ok := err.(net.Error); ok {
- hc.err = &permanentError{err: e}
- } else {
- hc.err = err
- }
- return hc.err
-}
-
-// prepareCipherSpec sets the encryption and MAC states
-// that a subsequent changeCipherSpec will use.
-func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac hash.Hash) {
- hc.version = version
- hc.nextCipher = cipher
- hc.nextMac = mac
-}
-
-// changeCipherSpec changes the encryption and MAC states
-// to the ones previously passed to prepareCipherSpec.
-func (hc *halfConn) changeCipherSpec() error {
- if hc.nextCipher == nil || hc.version == VersionTLS13 {
- return alertInternalError
- }
- hc.cipher = hc.nextCipher
- hc.mac = hc.nextMac
- hc.nextCipher = nil
- hc.nextMac = nil
- for i := range hc.seq {
- hc.seq[i] = 0
- }
- return nil
-}
-
-func (hc *halfConn) exportKey(encLevel EncryptionLevel, suite *cipherSuiteTLS13, trafficSecret []byte) {
- if hc.setKeyCallback != nil {
- s := &CipherSuiteTLS13{
- ID: suite.id,
- KeyLen: suite.keyLen,
- Hash: suite.hash,
- AEAD: func(key, fixedNonce []byte) cipher.AEAD { return suite.aead(key, fixedNonce) },
- }
- hc.setKeyCallback(encLevel, s, trafficSecret)
- }
-}
-
-func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, secret []byte) {
- hc.trafficSecret = secret
- key, iv := suite.trafficKey(secret)
- hc.cipher = suite.aead(key, iv)
- for i := range hc.seq {
- hc.seq[i] = 0
- }
-}
-
-// incSeq increments the sequence number.
-func (hc *halfConn) incSeq() {
- for i := 7; i >= 0; i-- {
- hc.seq[i]++
- if hc.seq[i] != 0 {
- return
- }
- }
-
- // Not allowed to let sequence number wrap.
- // Instead, must renegotiate before it does.
- // Not likely enough to bother.
- panic("TLS: sequence number wraparound")
-}
-
-// explicitNonceLen returns the number of bytes of explicit nonce or IV included
-// in each record. Explicit nonces are present only in CBC modes after TLS 1.0
-// and in certain AEAD modes in TLS 1.2.
-func (hc *halfConn) explicitNonceLen() int {
- if hc.cipher == nil {
- return 0
- }
-
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- return 0
- case aead:
- return c.explicitNonceLen()
- case cbcMode:
- // TLS 1.1 introduced a per-record explicit IV to fix the BEAST attack.
- if hc.version >= VersionTLS11 {
- return c.BlockSize()
- }
- return 0
- default:
- panic("unknown cipher type")
- }
-}
-
-// extractPadding returns, in constant time, the length of the padding to remove
-// from the end of payload. It also returns a byte which is equal to 255 if the
-// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
-func extractPadding(payload []byte) (toRemove int, good byte) {
- if len(payload) < 1 {
- return 0, 0
- }
-
- paddingLen := payload[len(payload)-1]
- t := uint(len(payload)-1) - uint(paddingLen)
- // if len(payload) >= (paddingLen - 1) then the MSB of t is zero
- good = byte(int32(^t) >> 31)
-
- // The maximum possible padding length plus the actual length field
- toCheck := 256
- // The length of the padded data is public, so we can use an if here
- if toCheck > len(payload) {
- toCheck = len(payload)
- }
-
- for i := 0; i < toCheck; i++ {
- t := uint(paddingLen) - uint(i)
- // if i <= paddingLen then the MSB of t is zero
- mask := byte(int32(^t) >> 31)
- b := payload[len(payload)-1-i]
- good &^= mask&paddingLen ^ mask&b
- }
-
- // We AND together the bits of good and replicate the result across
- // all the bits.
- good &= good << 4
- good &= good << 2
- good &= good << 1
- good = uint8(int8(good) >> 7)
-
- // Zero the padding length on error. This ensures any unchecked bytes
- // are included in the MAC. Otherwise, an attacker that could
- // distinguish MAC failures from padding failures could mount an attack
- // similar to POODLE in SSL 3.0: given a good ciphertext that uses a
- // full block's worth of padding, replace the final block with another
- // block. If the MAC check passed but the padding check failed, the
- // last byte of that block decrypted to the block size.
- //
- // See also macAndPaddingGood logic below.
- paddingLen &= good
-
- toRemove = int(paddingLen) + 1
- return
-}
-
-func roundUp(a, b int) int {
- return a + (b-a%b)%b
-}
-
-// cbcMode is an interface for block ciphers using cipher block chaining.
-type cbcMode interface {
- cipher.BlockMode
- SetIV([]byte)
-}
-
-// decrypt authenticates and decrypts the record if protection is active at
-// this stage. The returned plaintext might overlap with the input.
-func (hc *halfConn) decrypt(record []byte) ([]byte, recordType, error) {
- var plaintext []byte
- typ := recordType(record[0])
- payload := record[recordHeaderLen:]
-
- // In TLS 1.3, change_cipher_spec messages are to be ignored without being
- // decrypted. See RFC 8446, Appendix D.4.
- if hc.version == VersionTLS13 && typ == recordTypeChangeCipherSpec {
- return payload, typ, nil
- }
-
- paddingGood := byte(255)
- paddingLen := 0
-
- explicitNonceLen := hc.explicitNonceLen()
-
- if hc.cipher != nil {
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- c.XORKeyStream(payload, payload)
- case aead:
- if len(payload) < explicitNonceLen {
- return nil, 0, alertBadRecordMAC
- }
- nonce := payload[:explicitNonceLen]
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
- payload = payload[explicitNonceLen:]
-
- var additionalData []byte
- if hc.version == VersionTLS13 {
- additionalData = record[:recordHeaderLen]
- } else {
- additionalData = append(hc.scratchBuf[:0], hc.seq[:]...)
- additionalData = append(additionalData, record[:3]...)
- n := len(payload) - c.Overhead()
- additionalData = append(additionalData, byte(n>>8), byte(n))
- }
-
- var err error
- plaintext, err = c.Open(payload[:0], nonce, payload, additionalData)
- if err != nil {
- return nil, 0, alertBadRecordMAC
- }
- case cbcMode:
- blockSize := c.BlockSize()
- minPayload := explicitNonceLen + roundUp(hc.mac.Size()+1, blockSize)
- if len(payload)%blockSize != 0 || len(payload) < minPayload {
- return nil, 0, alertBadRecordMAC
- }
-
- if explicitNonceLen > 0 {
- c.SetIV(payload[:explicitNonceLen])
- payload = payload[explicitNonceLen:]
- }
- c.CryptBlocks(payload, payload)
-
- // In a limited attempt to protect against CBC padding oracles like
- // Lucky13, the data past paddingLen (which is secret) is passed to
- // the MAC function as extra data, to be fed into the HMAC after
- // computing the digest. This makes the MAC roughly constant time as
- // long as the digest computation is constant time and does not
- // affect the subsequent write, modulo cache effects.
- paddingLen, paddingGood = extractPadding(payload)
- default:
- panic("unknown cipher type")
- }
-
- if hc.version == VersionTLS13 {
- if typ != recordTypeApplicationData {
- return nil, 0, alertUnexpectedMessage
- }
- if len(plaintext) > maxPlaintext+1 {
- return nil, 0, alertRecordOverflow
- }
- // Remove padding and find the ContentType scanning from the end.
- for i := len(plaintext) - 1; i >= 0; i-- {
- if plaintext[i] != 0 {
- typ = recordType(plaintext[i])
- plaintext = plaintext[:i]
- break
- }
- if i == 0 {
- return nil, 0, alertUnexpectedMessage
- }
- }
- }
- } else {
- plaintext = payload
- }
-
- if hc.mac != nil {
- macSize := hc.mac.Size()
- if len(payload) < macSize {
- return nil, 0, alertBadRecordMAC
- }
-
- n := len(payload) - macSize - paddingLen
- n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 }
- record[3] = byte(n >> 8)
- record[4] = byte(n)
- remoteMAC := payload[n : n+macSize]
- localMAC := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload[:n], payload[n+macSize:])
-
- // This is equivalent to checking the MACs and paddingGood
- // separately, but in constant-time to prevent distinguishing
- // padding failures from MAC failures. Depending on what value
- // of paddingLen was returned on bad padding, distinguishing
- // bad MAC from bad padding can lead to an attack.
- //
- // See also the logic at the end of extractPadding.
- macAndPaddingGood := subtle.ConstantTimeCompare(localMAC, remoteMAC) & int(paddingGood)
- if macAndPaddingGood != 1 {
- return nil, 0, alertBadRecordMAC
- }
-
- plaintext = payload[:n]
- }
-
- hc.incSeq()
- return plaintext, typ, nil
-}
-
-func (c *Conn) setAlternativeRecordLayer() {
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- c.in.setKeyCallback = c.extraConfig.AlternativeRecordLayer.SetReadKey
- c.out.setKeyCallback = c.extraConfig.AlternativeRecordLayer.SetWriteKey
- }
-}
-
-// sliceForAppend extends the input slice by n bytes. head is the full extended
-// slice, while tail is the appended part. If the original slice has sufficient
-// capacity no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// encrypt encrypts payload, adding the appropriate nonce and/or MAC, and
-// appends it to record, which must already contain the record header.
-func (hc *halfConn) encrypt(record, payload []byte, rand io.Reader) ([]byte, error) {
- if hc.cipher == nil {
- return append(record, payload...), nil
- }
-
- var explicitNonce []byte
- if explicitNonceLen := hc.explicitNonceLen(); explicitNonceLen > 0 {
- record, explicitNonce = sliceForAppend(record, explicitNonceLen)
- if _, isCBC := hc.cipher.(cbcMode); !isCBC && explicitNonceLen < 16 {
- // The AES-GCM construction in TLS has an explicit nonce so that the
- // nonce can be random. However, the nonce is only 8 bytes which is
- // too small for a secure, random nonce. Therefore we use the
- // sequence number as the nonce. The 3DES-CBC construction also has
- // an 8 bytes nonce but its nonces must be unpredictable (see RFC
- // 5246, Appendix F.3), forcing us to use randomness. That's not
- // 3DES' biggest problem anyway because the birthday bound on block
- // collision is reached first due to its similarly small block size
- // (see the Sweet32 attack).
- copy(explicitNonce, hc.seq[:])
- } else {
- if _, err := io.ReadFull(rand, explicitNonce); err != nil {
- return nil, err
- }
- }
- }
-
- var dst []byte
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
- record, dst = sliceForAppend(record, len(payload)+len(mac))
- c.XORKeyStream(dst[:len(payload)], payload)
- c.XORKeyStream(dst[len(payload):], mac)
- case aead:
- nonce := explicitNonce
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
-
- if hc.version == VersionTLS13 {
- record = append(record, payload...)
-
- // Encrypt the actual ContentType and replace the plaintext one.
- record = append(record, record[0])
- record[0] = byte(recordTypeApplicationData)
-
- n := len(payload) + 1 + c.Overhead()
- record[3] = byte(n >> 8)
- record[4] = byte(n)
-
- record = c.Seal(record[:recordHeaderLen],
- nonce, record[recordHeaderLen:], record[:recordHeaderLen])
- } else {
- additionalData := append(hc.scratchBuf[:0], hc.seq[:]...)
- additionalData = append(additionalData, record[:recordHeaderLen]...)
- record = c.Seal(record, nonce, payload, additionalData)
- }
- case cbcMode:
- mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
- blockSize := c.BlockSize()
- plaintextLen := len(payload) + len(mac)
- paddingLen := blockSize - plaintextLen%blockSize
- record, dst = sliceForAppend(record, plaintextLen+paddingLen)
- copy(dst, payload)
- copy(dst[len(payload):], mac)
- for i := plaintextLen; i < len(dst); i++ {
- dst[i] = byte(paddingLen - 1)
- }
- if len(explicitNonce) > 0 {
- c.SetIV(explicitNonce)
- }
- c.CryptBlocks(dst, dst)
- default:
- panic("unknown cipher type")
- }
-
- // Update length to include nonce, MAC and any block padding needed.
- n := len(record) - recordHeaderLen
- record[3] = byte(n >> 8)
- record[4] = byte(n)
- hc.incSeq()
-
- return record, nil
-}
-
-// RecordHeaderError is returned when a TLS record header is invalid.
-type RecordHeaderError struct {
- // Msg contains a human readable string that describes the error.
- Msg string
- // RecordHeader contains the five bytes of TLS record header that
- // triggered the error.
- RecordHeader [5]byte
- // Conn provides the underlying net.Conn in the case that a client
- // sent an initial handshake that didn't look like TLS.
- // It is nil if there's already been a handshake or a TLS alert has
- // been written to the connection.
- Conn net.Conn
-}
-
-func (e RecordHeaderError) Error() string { return "tls: " + e.Msg }
-
-func (c *Conn) newRecordHeaderError(conn net.Conn, msg string) (err RecordHeaderError) {
- err.Msg = msg
- err.Conn = conn
- copy(err.RecordHeader[:], c.rawInput.Bytes())
- return err
-}
-
-func (c *Conn) readRecord() error {
- return c.readRecordOrCCS(false)
-}
-
-func (c *Conn) readChangeCipherSpec() error {
- return c.readRecordOrCCS(true)
-}
-
-// readRecordOrCCS reads one or more TLS records from the connection and
-// updates the record layer state. Some invariants:
-// * c.in must be locked
-// * c.input must be empty
-// During the handshake one and only one of the following will happen:
-// - c.hand grows
-// - c.in.changeCipherSpec is called
-// - an error is returned
-// After the handshake one and only one of the following will happen:
-// - c.hand grows
-// - c.input is set
-// - an error is returned
-func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error {
- if c.in.err != nil {
- return c.in.err
- }
- handshakeComplete := c.handshakeComplete()
-
- // This function modifies c.rawInput, which owns the c.input memory.
- if c.input.Len() != 0 {
- return c.in.setErrorLocked(errors.New("tls: internal error: attempted to read record with pending application data"))
- }
- c.input.Reset(nil)
-
- // Read header, payload.
- if err := c.readFromUntil(c.conn, recordHeaderLen); err != nil {
- // RFC 8446, Section 6.1 suggests that EOF without an alertCloseNotify
- // is an error, but popular web sites seem to do this, so we accept it
- // if and only if at the record boundary.
- if err == io.ErrUnexpectedEOF && c.rawInput.Len() == 0 {
- err = io.EOF
- }
- if e, ok := err.(net.Error); !ok || !e.Temporary() {
- c.in.setErrorLocked(err)
- }
- return err
- }
- hdr := c.rawInput.Bytes()[:recordHeaderLen]
- typ := recordType(hdr[0])
-
- // No valid TLS record has a type of 0x80, however SSLv2 handshakes
- // start with a uint16 length where the MSB is set and the first record
- // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests
- // an SSLv2 client.
- if !handshakeComplete && typ == 0x80 {
- c.sendAlert(alertProtocolVersion)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, "unsupported SSLv2 handshake received"))
- }
-
- vers := uint16(hdr[1])<<8 | uint16(hdr[2])
- n := int(hdr[3])<<8 | int(hdr[4])
- if c.haveVers && c.vers != VersionTLS13 && vers != c.vers {
- c.sendAlert(alertProtocolVersion)
- msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
- }
- if !c.haveVers {
- // First message, be extra suspicious: this might not be a TLS
- // client. Bail out before reading a full 'body', if possible.
- // The current max version is 3.3 so if the version is >= 16.0,
- // it's probably not real.
- if (typ != recordTypeAlert && typ != recordTypeHandshake) || vers >= 0x1000 {
- return c.in.setErrorLocked(c.newRecordHeaderError(c.conn, "first record does not look like a TLS handshake"))
- }
- }
- if c.vers == VersionTLS13 && n > maxCiphertextTLS13 || n > maxCiphertext {
- c.sendAlert(alertRecordOverflow)
- msg := fmt.Sprintf("oversized record received with length %d", n)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
- }
- if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
- if e, ok := err.(net.Error); !ok || !e.Temporary() {
- c.in.setErrorLocked(err)
- }
- return err
- }
-
- // Process message.
- record := c.rawInput.Next(recordHeaderLen + n)
- data, typ, err := c.in.decrypt(record)
- if err != nil {
- return c.in.setErrorLocked(c.sendAlert(err.(alert)))
- }
- if len(data) > maxPlaintext {
- return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow))
- }
-
- // Application Data messages are always protected.
- if c.in.cipher == nil && typ == recordTypeApplicationData {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- if typ != recordTypeAlert && typ != recordTypeChangeCipherSpec && len(data) > 0 {
- // This is a state-advancing message: reset the retry count.
- c.retryCount = 0
- }
-
- // Handshake messages MUST NOT be interleaved with other record types in TLS 1.3.
- if c.vers == VersionTLS13 && typ != recordTypeHandshake && c.hand.Len() > 0 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- switch typ {
- default:
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
-
- case recordTypeAlert:
- if len(data) != 2 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- if alert(data[1]) == alertCloseNotify {
- return c.in.setErrorLocked(io.EOF)
- }
- if c.vers == VersionTLS13 {
- return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
- }
- switch data[0] {
- case alertLevelWarning:
- // Drop the record on the floor and retry.
- return c.retryReadRecord(expectChangeCipherSpec)
- case alertLevelError:
- return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
- default:
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- case recordTypeChangeCipherSpec:
- if len(data) != 1 || data[0] != 1 {
- return c.in.setErrorLocked(c.sendAlert(alertDecodeError))
- }
- // Handshake messages are not allowed to fragment across the CCS.
- if c.hand.Len() > 0 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- // In TLS 1.3, change_cipher_spec records are ignored until the
- // Finished. See RFC 8446, Appendix D.4. Note that according to Section
- // 5, a server can send a ChangeCipherSpec before its ServerHello, when
- // c.vers is still unset. That's not useful though and suspicious if the
- // server then selects a lower protocol version, so don't allow that.
- if c.vers == VersionTLS13 {
- return c.retryReadRecord(expectChangeCipherSpec)
- }
- if !expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- if err := c.in.changeCipherSpec(); err != nil {
- return c.in.setErrorLocked(c.sendAlert(err.(alert)))
- }
-
- case recordTypeApplicationData:
- if !handshakeComplete || expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- // Some OpenSSL servers send empty records in order to randomize the
- // CBC IV. Ignore a limited number of empty records.
- if len(data) == 0 {
- return c.retryReadRecord(expectChangeCipherSpec)
- }
- // Note that data is owned by c.rawInput, following the Next call above,
- // to avoid copying the plaintext. This is safe because c.rawInput is
- // not read from or written to until c.input is drained.
- c.input.Reset(data)
-
- case recordTypeHandshake:
- if len(data) == 0 || expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- c.hand.Write(data)
- }
-
- return nil
-}
-
-// retryReadRecord recurses into readRecordOrCCS to drop a non-advancing record, like
-// a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3.
-func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error {
- c.retryCount++
- if c.retryCount > maxUselessRecords {
- c.sendAlert(alertUnexpectedMessage)
- return c.in.setErrorLocked(errors.New("tls: too many ignored records"))
- }
- return c.readRecordOrCCS(expectChangeCipherSpec)
-}
-
-// atLeastReader reads from R, stopping with EOF once at least N bytes have been
-// read. It is different from an io.LimitedReader in that it doesn't cut short
-// the last Read call, and in that it considers an early EOF an error.
-type atLeastReader struct {
- R io.Reader
- N int64
-}
-
-func (r *atLeastReader) Read(p []byte) (int, error) {
- if r.N <= 0 {
- return 0, io.EOF
- }
- n, err := r.R.Read(p)
- r.N -= int64(n) // won't underflow unless len(p) >= n > 9223372036854775809
- if r.N > 0 && err == io.EOF {
- return n, io.ErrUnexpectedEOF
- }
- if r.N <= 0 && err == nil {
- return n, io.EOF
- }
- return n, err
-}
-
-// readFromUntil reads from r into c.rawInput until c.rawInput contains
-// at least n bytes or else returns an error.
-func (c *Conn) readFromUntil(r io.Reader, n int) error {
- if c.rawInput.Len() >= n {
- return nil
- }
- needs := n - c.rawInput.Len()
- // There might be extra input waiting on the wire. Make a best effort
- // attempt to fetch it so that it can be used in (*Conn).Read to
- // "predict" closeNotify alerts.
- c.rawInput.Grow(needs + bytes.MinRead)
- _, err := c.rawInput.ReadFrom(&atLeastReader{r, int64(needs)})
- return err
-}
-
-// sendAlert sends a TLS alert message.
-func (c *Conn) sendAlertLocked(err alert) error {
- switch err {
- case alertNoRenegotiation, alertCloseNotify:
- c.tmp[0] = alertLevelWarning
- default:
- c.tmp[0] = alertLevelError
- }
- c.tmp[1] = byte(err)
-
- _, writeErr := c.writeRecordLocked(recordTypeAlert, c.tmp[0:2])
- if err == alertCloseNotify {
- // closeNotify is a special case in that it isn't an error.
- return writeErr
- }
-
- return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
-}
-
-// sendAlert sends a TLS alert message.
-func (c *Conn) sendAlert(err alert) error {
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- c.extraConfig.AlternativeRecordLayer.SendAlert(uint8(err))
- return &net.OpError{Op: "local error", Err: err}
- }
-
- c.out.Lock()
- defer c.out.Unlock()
- return c.sendAlertLocked(err)
-}
-
-const (
- // tcpMSSEstimate is a conservative estimate of the TCP maximum segment
- // size (MSS). A constant is used, rather than querying the kernel for
- // the actual MSS, to avoid complexity. The value here is the IPv6
- // minimum MTU (1280 bytes) minus the overhead of an IPv6 header (40
- // bytes) and a TCP header with timestamps (32 bytes).
- tcpMSSEstimate = 1208
-
- // recordSizeBoostThreshold is the number of bytes of application data
- // sent after which the TLS record size will be increased to the
- // maximum.
- recordSizeBoostThreshold = 128 * 1024
-)
-
-// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the
-// next application data record. There is the following trade-off:
-//
-// - For latency-sensitive applications, such as web browsing, each TLS
-// record should fit in one TCP segment.
-// - For throughput-sensitive applications, such as large file transfers,
-// larger TLS records better amortize framing and encryption overheads.
-//
-// A simple heuristic that works well in practice is to use small records for
-// the first 1MB of data, then use larger records for subsequent data, and
-// reset back to smaller records after the connection becomes idle. See "High
-// Performance Web Networking", Chapter 4, or:
-// https://www.igvita.com/2013/10/24/optimizing-tls-record-size-and-buffering-latency/
-//
-// In the interests of simplicity and determinism, this code does not attempt
-// to reset the record size once the connection is idle, however.
-func (c *Conn) maxPayloadSizeForWrite(typ recordType) int {
- if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData {
- return maxPlaintext
- }
-
- if c.bytesSent >= recordSizeBoostThreshold {
- return maxPlaintext
- }
-
- // Subtract TLS overheads to get the maximum payload size.
- payloadBytes := tcpMSSEstimate - recordHeaderLen - c.out.explicitNonceLen()
- if c.out.cipher != nil {
- switch ciph := c.out.cipher.(type) {
- case cipher.Stream:
- payloadBytes -= c.out.mac.Size()
- case cipher.AEAD:
- payloadBytes -= ciph.Overhead()
- case cbcMode:
- blockSize := ciph.BlockSize()
- // The payload must fit in a multiple of blockSize, with
- // room for at least one padding byte.
- payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
- // The MAC is appended before padding so affects the
- // payload size directly.
- payloadBytes -= c.out.mac.Size()
- default:
- panic("unknown cipher type")
- }
- }
- if c.vers == VersionTLS13 {
- payloadBytes-- // encrypted ContentType
- }
-
- // Allow packet growth in arithmetic progression up to max.
- pkt := c.packetsSent
- c.packetsSent++
- if pkt > 1000 {
- return maxPlaintext // avoid overflow in multiply below
- }
-
- n := payloadBytes * int(pkt+1)
- if n > maxPlaintext {
- n = maxPlaintext
- }
- return n
-}
-
-func (c *Conn) write(data []byte) (int, error) {
- if c.buffering {
- c.sendBuf = append(c.sendBuf, data...)
- return len(data), nil
- }
-
- n, err := c.conn.Write(data)
- c.bytesSent += int64(n)
- return n, err
-}
-
-func (c *Conn) flush() (int, error) {
- if len(c.sendBuf) == 0 {
- return 0, nil
- }
-
- n, err := c.conn.Write(c.sendBuf)
- c.bytesSent += int64(n)
- c.sendBuf = nil
- c.buffering = false
- return n, err
-}
-
-// outBufPool pools the record-sized scratch buffers used by writeRecordLocked.
-var outBufPool = sync.Pool{
- New: func() interface{} {
- return new([]byte)
- },
-}
-
-// writeRecordLocked writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
- outBufPtr := outBufPool.Get().(*[]byte)
- outBuf := *outBufPtr
- defer func() {
- // You might be tempted to simplify this by just passing &outBuf to Put,
- // but that would make the local copy of the outBuf slice header escape
- // to the heap, causing an allocation. Instead, we keep around the
- // pointer to the slice header returned by Get, which is already on the
- // heap, and overwrite and return that.
- *outBufPtr = outBuf
- outBufPool.Put(outBufPtr)
- }()
-
- var n int
- for len(data) > 0 {
- m := len(data)
- if maxPayload := c.maxPayloadSizeForWrite(typ); m > maxPayload {
- m = maxPayload
- }
-
- _, outBuf = sliceForAppend(outBuf[:0], recordHeaderLen)
- outBuf[0] = byte(typ)
- vers := c.vers
- if vers == 0 {
- // Some TLS servers fail if the record version is
- // greater than TLS 1.0 for the initial ClientHello.
- vers = VersionTLS10
- } else if vers == VersionTLS13 {
- // TLS 1.3 froze the record layer version to 1.2.
- // See RFC 8446, Section 5.1.
- vers = VersionTLS12
- }
- outBuf[1] = byte(vers >> 8)
- outBuf[2] = byte(vers)
- outBuf[3] = byte(m >> 8)
- outBuf[4] = byte(m)
-
- var err error
- outBuf, err = c.out.encrypt(outBuf, data[:m], c.config.rand())
- if err != nil {
- return n, err
- }
- if _, err := c.write(outBuf); err != nil {
- return n, err
- }
- n += m
- data = data[m:]
- }
-
- if typ == recordTypeChangeCipherSpec && c.vers != VersionTLS13 {
- if err := c.out.changeCipherSpec(); err != nil {
- return n, c.sendAlertLocked(err.(alert))
- }
- }
-
- return n, nil
-}
-
-// writeRecord writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- if typ == recordTypeChangeCipherSpec {
- return len(data), nil
- }
- return c.extraConfig.AlternativeRecordLayer.WriteRecord(data)
- }
-
- c.out.Lock()
- defer c.out.Unlock()
-
- return c.writeRecordLocked(typ, data)
-}
-
-// readHandshake reads the next handshake message from
-// the record layer.
-func (c *Conn) readHandshake() (interface{}, error) {
- var data []byte
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- var err error
- data, err = c.extraConfig.AlternativeRecordLayer.ReadHandshakeMessage()
- if err != nil {
- return nil, err
- }
- } else {
- for c.hand.Len() < 4 {
- if err := c.readRecord(); err != nil {
- return nil, err
- }
- }
-
- data = c.hand.Bytes()
- n := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
- if n > maxHandshake {
- c.sendAlertLocked(alertInternalError)
- return nil, c.in.setErrorLocked(fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake))
- }
- for c.hand.Len() < 4+n {
- if err := c.readRecord(); err != nil {
- return nil, err
- }
- }
- data = c.hand.Next(4 + n)
- }
- var m handshakeMessage
- switch data[0] {
- case typeHelloRequest:
- m = new(helloRequestMsg)
- case typeClientHello:
- m = new(clientHelloMsg)
- case typeServerHello:
- m = new(serverHelloMsg)
- case typeNewSessionTicket:
- if c.vers == VersionTLS13 {
- m = new(newSessionTicketMsgTLS13)
- } else {
- m = new(newSessionTicketMsg)
- }
- case typeCertificate:
- if c.vers == VersionTLS13 {
- m = new(certificateMsgTLS13)
- } else {
- m = new(certificateMsg)
- }
- case typeCertificateRequest:
- if c.vers == VersionTLS13 {
- m = new(certificateRequestMsgTLS13)
- } else {
- m = &certificateRequestMsg{
- hasSignatureAlgorithm: c.vers >= VersionTLS12,
- }
- }
- case typeCertificateStatus:
- m = new(certificateStatusMsg)
- case typeServerKeyExchange:
- m = new(serverKeyExchangeMsg)
- case typeServerHelloDone:
- m = new(serverHelloDoneMsg)
- case typeClientKeyExchange:
- m = new(clientKeyExchangeMsg)
- case typeCertificateVerify:
- m = &certificateVerifyMsg{
- hasSignatureAlgorithm: c.vers >= VersionTLS12,
- }
- case typeFinished:
- m = new(finishedMsg)
- case typeEncryptedExtensions:
- m = new(encryptedExtensionsMsg)
- case typeEndOfEarlyData:
- m = new(endOfEarlyDataMsg)
- case typeKeyUpdate:
- m = new(keyUpdateMsg)
- default:
- return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- // The handshake message unmarshalers
- // expect to be able to keep references to data,
- // so pass in a fresh copy that won't be overwritten.
- data = append([]byte(nil), data...)
-
- if !m.unmarshal(data) {
- return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- return m, nil
-}
-
-var (
- errShutdown = errors.New("tls: protocol is shutdown")
-)
-
-// Write writes data to the connection.
-//
-// As Write calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Write is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
-func (c *Conn) Write(b []byte) (int, error) {
- // interlock with Close below
- for {
- x := atomic.LoadInt32(&c.activeCall)
- if x&1 != 0 {
- return 0, net.ErrClosed
- }
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
- break
- }
- }
- defer atomic.AddInt32(&c.activeCall, -2)
-
- if err := c.Handshake(); err != nil {
- return 0, err
- }
-
- c.out.Lock()
- defer c.out.Unlock()
-
- if err := c.out.err; err != nil {
- return 0, err
- }
-
- if !c.handshakeComplete() {
- return 0, alertInternalError
- }
-
- if c.closeNotifySent {
- return 0, errShutdown
- }
-
- // TLS 1.0 is susceptible to a chosen-plaintext
- // attack when using block mode ciphers due to predictable IVs.
- // This can be prevented by splitting each Application Data
- // record into two records, effectively randomizing the IV.
- //
- // https://www.openssl.org/~bodo/tls-cbc.txt
- // https://bugzilla.mozilla.org/show_bug.cgi?id=665814
- // https://www.imperialviolet.org/2012/01/15/beastfollowup.html
-
- var m int
- if len(b) > 1 && c.vers == VersionTLS10 {
- if _, ok := c.out.cipher.(cipher.BlockMode); ok {
- n, err := c.writeRecordLocked(recordTypeApplicationData, b[:1])
- if err != nil {
- return n, c.out.setErrorLocked(err)
- }
- m, b = 1, b[1:]
- }
- }
-
- n, err := c.writeRecordLocked(recordTypeApplicationData, b)
- return n + m, c.out.setErrorLocked(err)
-}
-
-// handleRenegotiation processes a HelloRequest handshake message.
-func (c *Conn) handleRenegotiation() error {
- if c.vers == VersionTLS13 {
- return errors.New("tls: internal error: unexpected renegotiation")
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- helloReq, ok := msg.(*helloRequestMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(helloReq, msg)
- }
-
- if !c.isClient {
- return c.sendAlert(alertNoRenegotiation)
- }
-
- switch c.config.Renegotiation {
- case RenegotiateNever:
- return c.sendAlert(alertNoRenegotiation)
- case RenegotiateOnceAsClient:
- if c.handshakes > 1 {
- return c.sendAlert(alertNoRenegotiation)
- }
- case RenegotiateFreelyAsClient:
- // Ok.
- default:
- c.sendAlert(alertInternalError)
- return errors.New("tls: unknown Renegotiation value")
- }
-
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- atomic.StoreUint32(&c.handshakeStatus, 0)
- if c.handshakeErr = c.clientHandshake(); c.handshakeErr == nil {
- c.handshakes++
- }
- return c.handshakeErr
-}
-
-func (c *Conn) HandlePostHandshakeMessage() error {
- return c.handlePostHandshakeMessage()
-}
-
-// handlePostHandshakeMessage processes a handshake message arrived after the
-// handshake is complete. Up to TLS 1.2, it indicates the start of a renegotiation.
-func (c *Conn) handlePostHandshakeMessage() error {
- if c.vers != VersionTLS13 {
- return c.handleRenegotiation()
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- c.retryCount++
- if c.retryCount > maxUselessRecords {
- c.sendAlert(alertUnexpectedMessage)
- return c.in.setErrorLocked(errors.New("tls: too many non-advancing records"))
- }
-
- switch msg := msg.(type) {
- case *newSessionTicketMsgTLS13:
- return c.handleNewSessionTicket(msg)
- case *keyUpdateMsg:
- return c.handleKeyUpdate(msg)
- default:
- c.sendAlert(alertUnexpectedMessage)
- return fmt.Errorf("tls: received unexpected handshake message of type %T", msg)
- }
-}
-
-func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
- cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
- if cipherSuite == nil {
- return c.in.setErrorLocked(c.sendAlert(alertInternalError))
- }
-
- newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret)
- c.in.setTrafficSecret(cipherSuite, newSecret)
-
- if keyUpdate.updateRequested {
- c.out.Lock()
- defer c.out.Unlock()
-
- msg := &keyUpdateMsg{}
- _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
- if err != nil {
- // Surface the error at the next write.
- c.out.setErrorLocked(err)
- return nil
- }
-
- newSecret := cipherSuite.nextTrafficSecret(c.out.trafficSecret)
- c.out.setTrafficSecret(cipherSuite, newSecret)
- }
-
- return nil
-}
-
-// Read reads data from the connection.
-//
-// As Read calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Read is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
-func (c *Conn) Read(b []byte) (int, error) {
- if err := c.Handshake(); err != nil {
- return 0, err
- }
- if len(b) == 0 {
- // Put this after Handshake, in case people were calling
- // Read(nil) for the side effect of the Handshake.
- return 0, nil
- }
-
- c.in.Lock()
- defer c.in.Unlock()
-
- for c.input.Len() == 0 {
- if err := c.readRecord(); err != nil {
- return 0, err
- }
- for c.hand.Len() > 0 {
- if err := c.handlePostHandshakeMessage(); err != nil {
- return 0, err
- }
- }
- }
-
- n, _ := c.input.Read(b)
-
- // If a close-notify alert is waiting, read it so that we can return (n,
- // EOF) instead of (n, nil), to signal to the HTTP response reading
- // goroutine that the connection is now closed. This eliminates a race
- // where the HTTP response reading goroutine would otherwise not observe
- // the EOF until its next read, by which time a client goroutine might
- // have already tried to reuse the HTTP connection for a new request.
- // See https://golang.org/cl/76400046 and https://golang.org/issue/3514
- if n != 0 && c.input.Len() == 0 && c.rawInput.Len() > 0 &&
- recordType(c.rawInput.Bytes()[0]) == recordTypeAlert {
- if err := c.readRecord(); err != nil {
- return n, err // will be io.EOF on closeNotify
- }
- }
-
- return n, nil
-}
-
-// Close closes the connection.
-func (c *Conn) Close() error {
- // Interlock with Conn.Write above.
- var x int32
- for {
- x = atomic.LoadInt32(&c.activeCall)
- if x&1 != 0 {
- return net.ErrClosed
- }
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
- break
- }
- }
- if x != 0 {
- // io.Writer and io.Closer should not be used concurrently.
- // If Close is called while a Write is currently in-flight,
- // interpret that as a sign that this Close is really just
- // being used to break the Write and/or clean up resources and
- // avoid sending the alertCloseNotify, which may block
- // waiting on handshakeMutex or the c.out mutex.
- return c.conn.Close()
- }
-
- var alertErr error
- if c.handshakeComplete() {
- if err := c.closeNotify(); err != nil {
- alertErr = fmt.Errorf("tls: failed to send closeNotify alert (but connection was closed anyway): %w", err)
- }
- }
-
- if err := c.conn.Close(); err != nil {
- return err
- }
- return alertErr
-}
-
-var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake complete")
-
-// CloseWrite shuts down the writing side of the connection. It should only be
-// called once the handshake has completed and does not call CloseWrite on the
-// underlying connection. Most callers should just use Close.
-func (c *Conn) CloseWrite() error {
- if !c.handshakeComplete() {
- return errEarlyCloseWrite
- }
-
- return c.closeNotify()
-}
-
-func (c *Conn) closeNotify() error {
- c.out.Lock()
- defer c.out.Unlock()
-
- if !c.closeNotifySent {
- // Set a Write Deadline to prevent possibly blocking forever.
- c.SetWriteDeadline(time.Now().Add(time.Second * 5))
- c.closeNotifyErr = c.sendAlertLocked(alertCloseNotify)
- c.closeNotifySent = true
- // Any subsequent writes will fail.
- c.SetWriteDeadline(time.Now())
- }
- return c.closeNotifyErr
-}
-
-// Handshake runs the client or server handshake
-// protocol if it has not yet been run.
-//
-// Most uses of this package need not call Handshake explicitly: the
-// first Read or Write will call it automatically.
-//
-// For control over canceling or setting a timeout on a handshake, use
-// the Dialer's DialContext method.
-func (c *Conn) Handshake() error {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- if err := c.handshakeErr; err != nil {
- return err
- }
- if c.handshakeComplete() {
- return nil
- }
-
- c.in.Lock()
- defer c.in.Unlock()
-
- c.handshakeErr = c.handshakeFn()
- if c.handshakeErr == nil {
- c.handshakes++
- } else {
- // If an error occurred during the handshake try to flush the
- // alert that might be left in the buffer.
- c.flush()
- }
-
- if c.handshakeErr == nil && !c.handshakeComplete() {
- c.handshakeErr = errors.New("tls: internal error: handshake should have had a result")
- }
-
- return c.handshakeErr
-}
-
-// ConnectionState returns basic TLS details about the connection.
-func (c *Conn) ConnectionState() ConnectionState {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return c.connectionStateLocked()
-}
-
-// ConnectionStateWith0RTT returns basic TLS details (incl. 0-RTT status) about the connection.
-func (c *Conn) ConnectionStateWith0RTT() ConnectionStateWith0RTT {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return ConnectionStateWith0RTT{
- ConnectionState: c.connectionStateLocked(),
- Used0RTT: c.used0RTT,
- }
-}
-
-func (c *Conn) connectionStateLocked() ConnectionState {
- var state connectionState
- state.HandshakeComplete = c.handshakeComplete()
- state.Version = c.vers
- state.NegotiatedProtocol = c.clientProtocol
- state.DidResume = c.didResume
- state.NegotiatedProtocolIsMutual = true
- state.ServerName = c.serverName
- state.CipherSuite = c.cipherSuite
- state.PeerCertificates = c.peerCertificates
- state.VerifiedChains = c.verifiedChains
- state.SignedCertificateTimestamps = c.scts
- state.OCSPResponse = c.ocspResponse
- if !c.didResume && c.vers != VersionTLS13 {
- if c.clientFinishedIsFirst {
- state.TLSUnique = c.clientFinished[:]
- } else {
- state.TLSUnique = c.serverFinished[:]
- }
- }
- if c.config.Renegotiation != RenegotiateNever {
- state.ekm = noExportedKeyingMaterial
- } else {
- state.ekm = c.ekm
- }
- return toConnectionState(state)
-}
-
-// OCSPResponse returns the stapled OCSP response from the TLS server, if
-// any. (Only valid for client connections.)
-func (c *Conn) OCSPResponse() []byte {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- return c.ocspResponse
-}
-
-// VerifyHostname checks that the peer certificate chain is valid for
-// connecting to host. If so, it returns nil; if not, it returns an error
-// describing the problem.
-func (c *Conn) VerifyHostname(host string) error {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- if !c.isClient {
- return errors.New("tls: VerifyHostname called on TLS server connection")
- }
- if !c.handshakeComplete() {
- return errors.New("tls: handshake has not yet been performed")
- }
- if len(c.verifiedChains) == 0 {
- return errors.New("tls: handshake did not verify certificate chain")
- }
- return c.peerCertificates[0].VerifyHostname(host)
-}
-
-func (c *Conn) handshakeComplete() bool {
- return atomic.LoadUint32(&c.handshakeStatus) == 1
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_client.go b/vendor/github.com/marten-seemann/qtls-go1-16/handshake_client.go
deleted file mode 100644
index a447061ae..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_client.go
+++ /dev/null
@@ -1,1105 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "strings"
- "sync/atomic"
- "time"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-const clientSessionStateVersion = 1
-
-type clientHandshakeState struct {
- c *Conn
- serverHello *serverHelloMsg
- hello *clientHelloMsg
- suite *cipherSuite
- finishedHash finishedHash
- masterSecret []byte
- session *clientSessionState
-}
-
-func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
- config := c.config
- if len(config.ServerName) == 0 && !config.InsecureSkipVerify {
- return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config")
- }
-
- nextProtosLength := 0
- for _, proto := range config.NextProtos {
- if l := len(proto); l == 0 || l > 255 {
- return nil, nil, errors.New("tls: invalid NextProtos value")
- } else {
- nextProtosLength += 1 + l
- }
- }
- if nextProtosLength > 0xffff {
- return nil, nil, errors.New("tls: NextProtos values too large")
- }
-
- var supportedVersions []uint16
- var clientHelloVersion uint16
- if c.extraConfig.usesAlternativeRecordLayer() {
- if config.maxSupportedVersion() < VersionTLS13 {
- return nil, nil, errors.New("tls: MaxVersion prevents QUIC from using TLS 1.3")
- }
- // Only offer TLS 1.3 when QUIC is used.
- supportedVersions = []uint16{VersionTLS13}
- clientHelloVersion = VersionTLS13
- } else {
- supportedVersions = config.supportedVersions()
- if len(supportedVersions) == 0 {
- return nil, nil, errors.New("tls: no supported versions satisfy MinVersion and MaxVersion")
- }
- clientHelloVersion = config.maxSupportedVersion()
- }
-
- // The version at the beginning of the ClientHello was capped at TLS 1.2
- // for compatibility reasons. The supported_versions extension is used
- // to negotiate versions now. See RFC 8446, Section 4.2.1.
- if clientHelloVersion > VersionTLS12 {
- clientHelloVersion = VersionTLS12
- }
-
- hello := &clientHelloMsg{
- vers: clientHelloVersion,
- compressionMethods: []uint8{compressionNone},
- random: make([]byte, 32),
- ocspStapling: true,
- scts: true,
- serverName: hostnameInSNI(config.ServerName),
- supportedCurves: config.curvePreferences(),
- supportedPoints: []uint8{pointFormatUncompressed},
- secureRenegotiationSupported: true,
- alpnProtocols: config.NextProtos,
- supportedVersions: supportedVersions,
- }
-
- if c.handshakes > 0 {
- hello.secureRenegotiation = c.clientFinished[:]
- }
-
- possibleCipherSuites := config.cipherSuites()
- hello.cipherSuites = make([]uint16, 0, len(possibleCipherSuites))
-
- // add non-TLS 1.3 cipher suites
- if c.config.MinVersion <= VersionTLS12 {
- for _, suiteId := range possibleCipherSuites {
- for _, suite := range cipherSuites {
- if suite.id != suiteId {
- continue
- }
- // Don't advertise TLS 1.2-only cipher suites unless
- // we're attempting TLS 1.2.
- if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 {
- break
- }
- hello.cipherSuites = append(hello.cipherSuites, suiteId)
- break
- }
- }
- }
-
- _, err := io.ReadFull(config.rand(), hello.random)
- if err != nil {
- return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
- }
-
- // A random session ID is used to detect when the server accepted a ticket
- // and is resuming a session (see RFC 5077). In TLS 1.3, it's always set as
- // a compatibility measure (see RFC 8446, Section 4.1.2).
- if c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil {
- hello.sessionId = make([]byte, 32)
- if _, err := io.ReadFull(config.rand(), hello.sessionId); err != nil {
- return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
- }
- }
-
- if hello.vers >= VersionTLS12 {
- hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- }
-
- var params ecdheParameters
- if hello.supportedVersions[0] == VersionTLS13 {
- var hasTLS13CipherSuite bool
- // add TLS 1.3 cipher suites
- for _, suiteID := range possibleCipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- hasTLS13CipherSuite = true
- hello.cipherSuites = append(hello.cipherSuites, suiteID)
- }
- }
- }
- if !hasTLS13CipherSuite {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13()...)
- }
-
- curveID := config.curvePreferences()[0]
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err = generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return nil, nil, err
- }
- hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
- }
-
- if hello.supportedVersions[0] == VersionTLS13 && c.extraConfig != nil && c.extraConfig.GetExtensions != nil {
- hello.additionalExtensions = c.extraConfig.GetExtensions(typeClientHello)
- }
-
- return hello, params, nil
-}
-
-func (c *Conn) clientHandshake() (err error) {
- if c.config == nil {
- c.config = fromConfig(defaultConfig())
- }
- c.setAlternativeRecordLayer()
-
- // This may be a renegotiation handshake, in which case some fields
- // need to be reset.
- c.didResume = false
-
- hello, ecdheParams, err := c.makeClientHello()
- if err != nil {
- return err
- }
- c.serverName = hello.serverName
-
- cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
- if cacheKey != "" && session != nil {
- var deletedTicket bool
- if session.vers == VersionTLS13 && hello.earlyData && c.extraConfig != nil && c.extraConfig.Enable0RTT {
- // don't reuse a session ticket that enabled 0-RTT
- c.config.ClientSessionCache.Put(cacheKey, nil)
- deletedTicket = true
-
- if suite := cipherSuiteTLS13ByID(session.cipherSuite); suite != nil {
- h := suite.hash.New()
- h.Write(hello.marshal())
- clientEarlySecret := suite.deriveSecret(earlySecret, "c e traffic", h)
- c.out.exportKey(Encryption0RTT, suite, clientEarlySecret)
- if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hello.random, clientEarlySecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- }
- }
- if !deletedTicket {
- defer func() {
- // If we got a handshake failure when resuming a session, throw away
- // the session ticket. See RFC 5077, Section 3.2.
- //
- // RFC 8446 makes no mention of dropping tickets on failure, but it
- // does require servers to abort on invalid binders, so we need to
- // delete tickets to recover from a corrupted PSK.
- if err != nil {
- c.config.ClientSessionCache.Put(cacheKey, nil)
- }
- }()
- }
- }
-
- if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- serverHello, ok := msg.(*serverHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverHello, msg)
- }
-
- if err := c.pickTLSVersion(serverHello); err != nil {
- return err
- }
-
- // If we are negotiating a protocol version that's lower than what we
- // support, check for the server downgrade canaries.
- // See RFC 8446, Section 4.1.3.
- maxVers := c.config.maxSupportedVersion()
- tls12Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS12
- tls11Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS11
- if maxVers == VersionTLS13 && c.vers <= VersionTLS12 && (tls12Downgrade || tls11Downgrade) ||
- maxVers == VersionTLS12 && c.vers <= VersionTLS11 && tls11Downgrade {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: downgrade attempt detected, possibly due to a MitM attack or a broken middlebox")
- }
-
- if c.vers == VersionTLS13 {
- hs := &clientHandshakeStateTLS13{
- c: c,
- serverHello: serverHello,
- hello: hello,
- ecdheParams: ecdheParams,
- session: session,
- earlySecret: earlySecret,
- binderKey: binderKey,
- }
-
- // In TLS 1.3, session tickets are delivered after the handshake.
- return hs.handshake()
- }
-
- hs := &clientHandshakeState{
- c: c,
- serverHello: serverHello,
- hello: hello,
- session: session,
- }
-
- if err := hs.handshake(); err != nil {
- return err
- }
-
- // If we had a successful handshake and hs.session is different from
- // the one already cached - cache a new one.
- if cacheKey != "" && hs.session != nil && session != hs.session {
- c.config.ClientSessionCache.Put(cacheKey, toClientSessionState(hs.session))
- }
-
- return nil
-}
-
-// extract the app data saved in the session.nonce,
-// and set the session.nonce to the actual nonce value
-func (c *Conn) decodeSessionState(session *clientSessionState) (uint32 /* max early data */, []byte /* app data */, bool /* ok */) {
- s := cryptobyte.String(session.nonce)
- var version uint16
- if !s.ReadUint16(&version) {
- return 0, nil, false
- }
- if version != clientSessionStateVersion {
- return 0, nil, false
- }
- var maxEarlyData uint32
- if !s.ReadUint32(&maxEarlyData) {
- return 0, nil, false
- }
- var appData []byte
- if !readUint16LengthPrefixed(&s, &appData) {
- return 0, nil, false
- }
- var nonce []byte
- if !readUint16LengthPrefixed(&s, &nonce) {
- return 0, nil, false
- }
- session.nonce = nonce
- return maxEarlyData, appData, true
-}
-
-func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
- session *clientSessionState, earlySecret, binderKey []byte) {
- if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return "", nil, nil, nil
- }
-
- hello.ticketSupported = true
-
- if hello.supportedVersions[0] == VersionTLS13 {
- // Require DHE on resumption as it guarantees forward secrecy against
- // compromise of the session ticket key. See RFC 8446, Section 4.2.9.
- hello.pskModes = []uint8{pskModeDHE}
- }
-
- // Session resumption is not allowed if renegotiating because
- // renegotiation is primarily used to allow a client to send a client
- // certificate, which would be skipped if session resumption occurred.
- if c.handshakes != 0 {
- return "", nil, nil, nil
- }
-
- // Try to resume a previously negotiated TLS session, if available.
- cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
- sess, ok := c.config.ClientSessionCache.Get(cacheKey)
- if !ok || sess == nil {
- return cacheKey, nil, nil, nil
- }
- session = fromClientSessionState(sess)
-
- var appData []byte
- var maxEarlyData uint32
- if session.vers == VersionTLS13 {
- var ok bool
- maxEarlyData, appData, ok = c.decodeSessionState(session)
- if !ok { // delete it, if parsing failed
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
- }
-
- // Check that version used for the previous session is still valid.
- versOk := false
- for _, v := range hello.supportedVersions {
- if v == session.vers {
- versOk = true
- break
- }
- }
- if !versOk {
- return cacheKey, nil, nil, nil
- }
-
- // Check that the cached server certificate is not expired, and that it's
- // valid for the ServerName. This should be ensured by the cache key, but
- // protect the application from a faulty ClientSessionCache implementation.
- if !c.config.InsecureSkipVerify {
- if len(session.verifiedChains) == 0 {
- // The original connection had InsecureSkipVerify, while this doesn't.
- return cacheKey, nil, nil, nil
- }
- serverCert := session.serverCertificates[0]
- if c.config.time().After(serverCert.NotAfter) {
- // Expired certificate, delete the entry.
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
- if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
- return cacheKey, nil, nil, nil
- }
- }
-
- if session.vers != VersionTLS13 {
- // In TLS 1.2 the cipher suite must match the resumed session. Ensure we
- // are still offering it.
- if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
- return cacheKey, nil, nil, nil
- }
-
- hello.sessionTicket = session.sessionTicket
- return
- }
-
- // Check that the session ticket is not expired.
- if c.config.time().After(session.useBy) {
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
-
- // In TLS 1.3 the KDF hash must match the resumed session. Ensure we
- // offer at least one cipher suite with that hash.
- cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
- if cipherSuite == nil {
- return cacheKey, nil, nil, nil
- }
- cipherSuiteOk := false
- for _, offeredID := range hello.cipherSuites {
- offeredSuite := cipherSuiteTLS13ByID(offeredID)
- if offeredSuite != nil && offeredSuite.hash == cipherSuite.hash {
- cipherSuiteOk = true
- break
- }
- }
- if !cipherSuiteOk {
- return cacheKey, nil, nil, nil
- }
-
- // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
- ticketAge := uint32(c.config.time().Sub(session.receivedAt) / time.Millisecond)
- identity := pskIdentity{
- label: session.sessionTicket,
- obfuscatedTicketAge: ticketAge + session.ageAdd,
- }
- hello.pskIdentities = []pskIdentity{identity}
- hello.pskBinders = [][]byte{make([]byte, cipherSuite.hash.Size())}
-
- // Compute the PSK binders. See RFC 8446, Section 4.2.11.2.
- psk := cipherSuite.expandLabel(session.masterSecret, "resumption",
- session.nonce, cipherSuite.hash.Size())
- earlySecret = cipherSuite.extract(psk, nil)
- binderKey = cipherSuite.deriveSecret(earlySecret, resumptionBinderLabel, nil)
- if c.extraConfig != nil {
- hello.earlyData = c.extraConfig.Enable0RTT && maxEarlyData > 0
- }
- transcript := cipherSuite.hash.New()
- transcript.Write(hello.marshalWithoutBinders())
- pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
- hello.updateBinders(pskBinders)
-
- if session.vers == VersionTLS13 && c.extraConfig != nil && c.extraConfig.SetAppDataFromSessionState != nil {
- c.extraConfig.SetAppDataFromSessionState(appData)
- }
- return
-}
-
-func (c *Conn) pickTLSVersion(serverHello *serverHelloMsg) error {
- peerVersion := serverHello.vers
- if serverHello.supportedVersion != 0 {
- peerVersion = serverHello.supportedVersion
- }
-
- vers, ok := c.config.mutualVersion([]uint16{peerVersion})
- if !ok {
- c.sendAlert(alertProtocolVersion)
- return fmt.Errorf("tls: server selected unsupported protocol version %x", peerVersion)
- }
-
- c.vers = vers
- c.haveVers = true
- c.in.version = vers
- c.out.version = vers
-
- return nil
-}
-
-// Does the handshake, either a full one or resumes old session. Requires hs.c,
-// hs.hello, hs.serverHello, and, optionally, hs.session to be set.
-func (hs *clientHandshakeState) handshake() error {
- c := hs.c
-
- isResume, err := hs.processServerHello()
- if err != nil {
- return err
- }
-
- hs.finishedHash = newFinishedHash(c.vers, hs.suite)
-
- // No signatures of the handshake are needed in a resumption.
- // Otherwise, in a full handshake, if we don't have any certificates
- // configured then we will never send a CertificateVerify message and
- // thus no signatures are needed in that case either.
- if isResume || (len(c.config.Certificates) == 0 && c.config.GetClientCertificate == nil) {
- hs.finishedHash.discardHandshakeBuffer()
- }
-
- hs.finishedHash.Write(hs.hello.marshal())
- hs.finishedHash.Write(hs.serverHello.marshal())
-
- c.buffering = true
- c.didResume = isResume
- if isResume {
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.readSessionTicket(); err != nil {
- return err
- }
- if err := hs.readFinished(c.serverFinished[:]); err != nil {
- return err
- }
- c.clientFinishedIsFirst = false
- // Make sure the connection is still being verified whether or not this
- // is a resumption. Resumptions currently don't reverify certificates so
- // they don't call verifyServerCertificate. See Issue 31641.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- if err := hs.sendFinished(c.clientFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- } else {
- if err := hs.doFullHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.sendFinished(c.clientFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- c.clientFinishedIsFirst = true
- if err := hs.readSessionTicket(); err != nil {
- return err
- }
- if err := hs.readFinished(c.serverFinished[:]); err != nil {
- return err
- }
- }
-
- c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-func (hs *clientHandshakeState) pickCipherSuite() error {
- if hs.suite = mutualCipherSuite(hs.hello.cipherSuites, hs.serverHello.cipherSuite); hs.suite == nil {
- hs.c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: server chose an unconfigured cipher suite")
- }
-
- hs.c.cipherSuite = hs.suite.id
- return nil
-}
-
-func (hs *clientHandshakeState) doFullHandshake() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- certMsg, ok := msg.(*certificateMsg)
- if !ok || len(certMsg.certificates) == 0 {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.finishedHash.Write(certMsg.marshal())
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- cs, ok := msg.(*certificateStatusMsg)
- if ok {
- // RFC4366 on Certificate Status Request:
- // The server MAY return a "certificate_status" message.
-
- if !hs.serverHello.ocspStapling {
- // If a server returns a "CertificateStatus" message, then the
- // server MUST have included an extension of type "status_request"
- // with empty "extension_data" in the extended server hello.
-
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: received unexpected CertificateStatus message")
- }
- hs.finishedHash.Write(cs.marshal())
-
- c.ocspResponse = cs.response
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- if c.handshakes == 0 {
- // If this is the first handshake on a connection, process and
- // (optionally) verify the server's certificates.
- if err := c.verifyServerCertificate(certMsg.certificates); err != nil {
- return err
- }
- } else {
- // This is a renegotiation handshake. We require that the
- // server's identity (i.e. leaf certificate) is unchanged and
- // thus any previous trust decision is still valid.
- //
- // See https://mitls.org/pages/attacks/3SHAKE for the
- // motivation behind this requirement.
- if !bytes.Equal(c.peerCertificates[0].Raw, certMsg.certificates[0]) {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: server's identity changed during renegotiation")
- }
- }
-
- keyAgreement := hs.suite.ka(c.vers)
-
- skx, ok := msg.(*serverKeyExchangeMsg)
- if ok {
- hs.finishedHash.Write(skx.marshal())
- err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
- if err != nil {
- c.sendAlert(alertUnexpectedMessage)
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- var chainToSend *Certificate
- var certRequested bool
- certReq, ok := msg.(*certificateRequestMsg)
- if ok {
- certRequested = true
- hs.finishedHash.Write(certReq.marshal())
-
- cri := certificateRequestInfoFromMsg(c.vers, certReq)
- if chainToSend, err = c.getClientCertificate(cri); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- shd, ok := msg.(*serverHelloDoneMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(shd, msg)
- }
- hs.finishedHash.Write(shd.marshal())
-
- // If the server requested a certificate then we have to send a
- // Certificate message, even if it's empty because we don't have a
- // certificate to send.
- if certRequested {
- certMsg = new(certificateMsg)
- certMsg.certificates = chainToSend.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
- }
-
- preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hs.hello, c.peerCertificates[0])
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- if ckx != nil {
- hs.finishedHash.Write(ckx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
- return err
- }
- }
-
- if chainToSend != nil && len(chainToSend.Certificate) > 0 {
- certVerify := &certificateVerifyMsg{}
-
- key, ok := chainToSend.PrivateKey.(crypto.Signer)
- if !ok {
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey)
- }
-
- var sigType uint8
- var sigHash crypto.Hash
- if c.vers >= VersionTLS12 {
- signatureAlgorithm, err := selectSignatureScheme(c.vers, chainToSend, certReq.supportedSignatureAlgorithms)
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- certVerify.hasSignatureAlgorithm = true
- certVerify.signatureAlgorithm = signatureAlgorithm
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(key.Public())
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- }
-
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- certVerify.signature, err = key.Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- hs.finishedHash.Write(certVerify.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
- return err
- }
- }
-
- hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.hello.random, hs.serverHello.random)
- if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.hello.random, hs.masterSecret); err != nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: failed to write to key log: " + err.Error())
- }
-
- hs.finishedHash.discardHandshakeBuffer()
-
- return nil
-}
-
-func (hs *clientHandshakeState) establishKeys() error {
- c := hs.c
-
- clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
- keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
- var clientCipher, serverCipher interface{}
- var clientHash, serverHash hash.Hash
- if hs.suite.cipher != nil {
- clientCipher = hs.suite.cipher(clientKey, clientIV, false /* not for reading */)
- clientHash = hs.suite.mac(clientMAC)
- serverCipher = hs.suite.cipher(serverKey, serverIV, true /* for reading */)
- serverHash = hs.suite.mac(serverMAC)
- } else {
- clientCipher = hs.suite.aead(clientKey, clientIV)
- serverCipher = hs.suite.aead(serverKey, serverIV)
- }
-
- c.in.prepareCipherSpec(c.vers, serverCipher, serverHash)
- c.out.prepareCipherSpec(c.vers, clientCipher, clientHash)
- return nil
-}
-
-func (hs *clientHandshakeState) serverResumedSession() bool {
- // If the server responded with the same sessionId then it means the
- // sessionTicket is being used to resume a TLS session.
- return hs.session != nil && hs.hello.sessionId != nil &&
- bytes.Equal(hs.serverHello.sessionId, hs.hello.sessionId)
-}
-
-func (hs *clientHandshakeState) processServerHello() (bool, error) {
- c := hs.c
-
- if err := hs.pickCipherSuite(); err != nil {
- return false, err
- }
-
- if hs.serverHello.compressionMethod != compressionNone {
- c.sendAlert(alertUnexpectedMessage)
- return false, errors.New("tls: server selected unsupported compression format")
- }
-
- if c.handshakes == 0 && hs.serverHello.secureRenegotiationSupported {
- c.secureRenegotiation = true
- if len(hs.serverHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
- }
-
- if c.handshakes > 0 && c.secureRenegotiation {
- var expectedSecureRenegotiation [24]byte
- copy(expectedSecureRenegotiation[:], c.clientFinished[:])
- copy(expectedSecureRenegotiation[12:], c.serverFinished[:])
- if !bytes.Equal(hs.serverHello.secureRenegotiation, expectedSecureRenegotiation[:]) {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: incorrect renegotiation extension contents")
- }
- }
-
- if hs.serverHello.alpnProtocol != "" {
- if len(hs.hello.alpnProtocols) == 0 {
- c.sendAlert(alertUnsupportedExtension)
- return false, errors.New("tls: server advertised unrequested ALPN extension")
- }
- if mutualProtocol([]string{hs.serverHello.alpnProtocol}, hs.hello.alpnProtocols) == "" {
- c.sendAlert(alertUnsupportedExtension)
- return false, errors.New("tls: server selected unadvertised ALPN protocol")
- }
- c.clientProtocol = hs.serverHello.alpnProtocol
- }
-
- c.scts = hs.serverHello.scts
-
- if !hs.serverResumedSession() {
- return false, nil
- }
-
- if hs.session.vers != c.vers {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: server resumed a session with a different version")
- }
-
- if hs.session.cipherSuite != hs.suite.id {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: server resumed a session with a different cipher suite")
- }
-
- // Restore masterSecret, peerCerts, and ocspResponse from previous state
- hs.masterSecret = hs.session.masterSecret
- c.peerCertificates = hs.session.serverCertificates
- c.verifiedChains = hs.session.verifiedChains
- c.ocspResponse = hs.session.ocspResponse
- // Let the ServerHello SCTs override the session SCTs from the original
- // connection, if any are provided
- if len(c.scts) == 0 && len(hs.session.scts) != 0 {
- c.scts = hs.session.scts
- }
-
- return true, nil
-}
-
-func (hs *clientHandshakeState) readFinished(out []byte) error {
- c := hs.c
-
- if err := c.readChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- serverFinished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverFinished, msg)
- }
-
- verify := hs.finishedHash.serverSum(hs.masterSecret)
- if len(verify) != len(serverFinished.verifyData) ||
- subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: server's Finished message was incorrect")
- }
- hs.finishedHash.Write(serverFinished.marshal())
- copy(out, verify)
- return nil
-}
-
-func (hs *clientHandshakeState) readSessionTicket() error {
- if !hs.serverHello.ticketSupported {
- return nil
- }
-
- c := hs.c
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- sessionTicketMsg, ok := msg.(*newSessionTicketMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(sessionTicketMsg, msg)
- }
- hs.finishedHash.Write(sessionTicketMsg.marshal())
-
- hs.session = &clientSessionState{
- sessionTicket: sessionTicketMsg.ticket,
- vers: c.vers,
- cipherSuite: hs.suite.id,
- masterSecret: hs.masterSecret,
- serverCertificates: c.peerCertificates,
- verifiedChains: c.verifiedChains,
- receivedAt: c.config.time(),
- ocspResponse: c.ocspResponse,
- scts: c.scts,
- }
-
- return nil
-}
-
-func (hs *clientHandshakeState) sendFinished(out []byte) error {
- c := hs.c
-
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
- return err
- }
-
- finished := new(finishedMsg)
- finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
- copy(out, finished.verifyData)
- return nil
-}
-
-// verifyServerCertificate parses and verifies the provided chain, setting
-// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
-func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
- certs := make([]*x509.Certificate, len(certificates))
- for i, asn1Data := range certificates {
- cert, err := x509.ParseCertificate(asn1Data)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to parse certificate from server: " + err.Error())
- }
- certs[i] = cert
- }
-
- if !c.config.InsecureSkipVerify {
- opts := x509.VerifyOptions{
- Roots: c.config.RootCAs,
- CurrentTime: c.config.time(),
- DNSName: c.config.ServerName,
- Intermediates: x509.NewCertPool(),
- }
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
- var err error
- c.verifiedChains, err = certs[0].Verify(opts)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- switch certs[0].PublicKey.(type) {
- case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
- break
- default:
- c.sendAlert(alertUnsupportedCertificate)
- return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey)
- }
-
- c.peerCertificates = certs
-
- if c.config.VerifyPeerCertificate != nil {
- if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- return nil
-}
-
-// certificateRequestInfoFromMsg generates a CertificateRequestInfo from a TLS
-// <= 1.2 CertificateRequest, making an effort to fill in missing information.
-func certificateRequestInfoFromMsg(vers uint16, certReq *certificateRequestMsg) *CertificateRequestInfo {
- cri := &certificateRequestInfo{
- AcceptableCAs: certReq.certificateAuthorities,
- Version: vers,
- }
-
- var rsaAvail, ecAvail bool
- for _, certType := range certReq.certificateTypes {
- switch certType {
- case certTypeRSASign:
- rsaAvail = true
- case certTypeECDSASign:
- ecAvail = true
- }
- }
-
- if !certReq.hasSignatureAlgorithm {
- // Prior to TLS 1.2, signature schemes did not exist. In this case we
- // make up a list based on the acceptable certificate types, to help
- // GetClientCertificate and SupportsCertificate select the right certificate.
- // The hash part of the SignatureScheme is a lie here, because
- // TLS 1.0 and 1.1 always use MD5+SHA1 for RSA and SHA1 for ECDSA.
- switch {
- case rsaAvail && ecAvail:
- cri.SignatureSchemes = []SignatureScheme{
- ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
- PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
- }
- case rsaAvail:
- cri.SignatureSchemes = []SignatureScheme{
- PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
- }
- case ecAvail:
- cri.SignatureSchemes = []SignatureScheme{
- ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
- }
- }
- return toCertificateRequestInfo(cri)
- }
-
- // Filter the signature schemes based on the certificate types.
- // See RFC 5246, Section 7.4.4 (where it calls this "somewhat complicated").
- cri.SignatureSchemes = make([]SignatureScheme, 0, len(certReq.supportedSignatureAlgorithms))
- for _, sigScheme := range certReq.supportedSignatureAlgorithms {
- sigType, _, err := typeAndHashFromSignatureScheme(sigScheme)
- if err != nil {
- continue
- }
- switch sigType {
- case signatureECDSA, signatureEd25519:
- if ecAvail {
- cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
- }
- case signatureRSAPSS, signaturePKCS1v15:
- if rsaAvail {
- cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
- }
- }
- }
-
- return toCertificateRequestInfo(cri)
-}
-
-func (c *Conn) getClientCertificate(cri *CertificateRequestInfo) (*Certificate, error) {
- if c.config.GetClientCertificate != nil {
- return c.config.GetClientCertificate(cri)
- }
-
- for _, chain := range c.config.Certificates {
- if err := cri.SupportsCertificate(&chain); err != nil {
- continue
- }
- return &chain, nil
- }
-
- // No acceptable certificate found. Don't send a certificate.
- return new(Certificate), nil
-}
-
-const clientSessionCacheKeyPrefix = "qtls-"
-
-// clientSessionCacheKey returns a key used to cache sessionTickets that could
-// be used to resume previously negotiated TLS sessions with a server.
-func clientSessionCacheKey(serverAddr net.Addr, config *config) string {
- if len(config.ServerName) > 0 {
- return clientSessionCacheKeyPrefix + config.ServerName
- }
- return clientSessionCacheKeyPrefix + serverAddr.String()
-}
-
-// mutualProtocol finds the mutual ALPN protocol given list of possible
-// protocols and a list of the preference order.
-func mutualProtocol(protos, preferenceProtos []string) string {
- for _, s := range preferenceProtos {
- for _, c := range protos {
- if s == c {
- return s
- }
- }
- }
- return ""
-}
-
-// hostnameInSNI converts name into an appropriate hostname for SNI.
-// Literal IP addresses and absolute FQDNs are not permitted as SNI values.
-// See RFC 6066, Section 3.
-func hostnameInSNI(name string) string {
- host := name
- if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
- host = host[1 : len(host)-1]
- }
- if i := strings.LastIndex(host, "%"); i > 0 {
- host = host[:i]
- }
- if net.ParseIP(host) != nil {
- return ""
- }
- for len(name) > 0 && name[len(name)-1] == '.' {
- name = name[:len(name)-1]
- }
- return name
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_client_tls13.go b/vendor/github.com/marten-seemann/qtls-go1-16/handshake_client_tls13.go
deleted file mode 100644
index fb70cec06..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_client_tls13.go
+++ /dev/null
@@ -1,740 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto"
- "crypto/hmac"
- "crypto/rsa"
- "encoding/binary"
- "errors"
- "fmt"
- "hash"
- "sync/atomic"
- "time"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-type clientHandshakeStateTLS13 struct {
- c *Conn
- serverHello *serverHelloMsg
- hello *clientHelloMsg
- ecdheParams ecdheParameters
-
- session *clientSessionState
- earlySecret []byte
- binderKey []byte
-
- certReq *certificateRequestMsgTLS13
- usingPSK bool
- sentDummyCCS bool
- suite *cipherSuiteTLS13
- transcript hash.Hash
- masterSecret []byte
- trafficSecret []byte // client_application_traffic_secret_0
-}
-
-// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and,
-// optionally, hs.session, hs.earlySecret and hs.binderKey to be set.
-func (hs *clientHandshakeStateTLS13) handshake() error {
- c := hs.c
-
- // The server must not select TLS 1.3 in a renegotiation. See RFC 8446,
- // sections 4.1.2 and 4.1.3.
- if c.handshakes > 0 {
- c.sendAlert(alertProtocolVersion)
- return errors.New("tls: server selected TLS 1.3 in a renegotiation")
- }
-
- // Consistency check on the presence of a keyShare and its parameters.
- if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 {
- return c.sendAlert(alertInternalError)
- }
-
- if err := hs.checkServerHelloOrHRR(); err != nil {
- return err
- }
-
- hs.transcript = hs.suite.hash.New()
- hs.transcript.Write(hs.hello.marshal())
-
- if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
- if err := hs.processHelloRetryRequest(); err != nil {
- return err
- }
- }
-
- hs.transcript.Write(hs.serverHello.marshal())
-
- c.buffering = true
- if err := hs.processServerHello(); err != nil {
- return err
- }
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
- if err := hs.establishHandshakeKeys(); err != nil {
- return err
- }
- if err := hs.readServerParameters(); err != nil {
- return err
- }
- if err := hs.readServerCertificate(); err != nil {
- return err
- }
- if err := hs.readServerFinished(); err != nil {
- return err
- }
- if err := hs.sendClientCertificate(); err != nil {
- return err
- }
- if err := hs.sendClientFinished(); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
-
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-// checkServerHelloOrHRR does validity checks that apply to both ServerHello and
-// HelloRetryRequest messages. It sets hs.suite.
-func (hs *clientHandshakeStateTLS13) checkServerHelloOrHRR() error {
- c := hs.c
-
- if hs.serverHello.supportedVersion == 0 {
- c.sendAlert(alertMissingExtension)
- return errors.New("tls: server selected TLS 1.3 using the legacy version field")
- }
-
- if hs.serverHello.supportedVersion != VersionTLS13 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid version after a HelloRetryRequest")
- }
-
- if hs.serverHello.vers != VersionTLS12 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an incorrect legacy version")
- }
-
- if hs.serverHello.ocspStapling ||
- hs.serverHello.ticketSupported ||
- hs.serverHello.secureRenegotiationSupported ||
- len(hs.serverHello.secureRenegotiation) != 0 ||
- len(hs.serverHello.alpnProtocol) != 0 ||
- len(hs.serverHello.scts) != 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server sent a ServerHello extension forbidden in TLS 1.3")
- }
-
- if !bytes.Equal(hs.hello.sessionId, hs.serverHello.sessionId) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server did not echo the legacy session ID")
- }
-
- if hs.serverHello.compressionMethod != compressionNone {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported compression format")
- }
-
- selectedSuite := mutualCipherSuiteTLS13(hs.hello.cipherSuites, hs.serverHello.cipherSuite)
- if hs.suite != nil && selectedSuite != hs.suite {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server changed cipher suite after a HelloRetryRequest")
- }
- if selectedSuite == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server chose an unconfigured cipher suite")
- }
- hs.suite = selectedSuite
- c.cipherSuite = hs.suite.id
-
- return nil
-}
-
-// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
-// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
-func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
- if hs.sentDummyCCS {
- return nil
- }
- hs.sentDummyCCS = true
-
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
-}
-
-// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
-// resends hs.hello, and reads the new ServerHello into hs.serverHello.
-func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
- c := hs.c
-
- // The first ClientHello gets double-hashed into the transcript upon a
- // HelloRetryRequest. (The idea is that the server might offload transcript
- // storage to the client in the cookie.) See RFC 8446, Section 4.4.1.
- chHash := hs.transcript.Sum(nil)
- hs.transcript.Reset()
- hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- hs.transcript.Write(chHash)
- hs.transcript.Write(hs.serverHello.marshal())
-
- // The only HelloRetryRequest extensions we support are key_share and
- // cookie, and clients must abort the handshake if the HRR would not result
- // in any change in the ClientHello.
- if hs.serverHello.selectedGroup == 0 && hs.serverHello.cookie == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an unnecessary HelloRetryRequest message")
- }
-
- if hs.serverHello.cookie != nil {
- hs.hello.cookie = hs.serverHello.cookie
- }
-
- if hs.serverHello.serverShare.group != 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: received malformed key_share extension")
- }
-
- // If the server sent a key_share extension selecting a group, ensure it's
- // a group we advertised but did not send a key share for, and send a key
- // share for it this time.
- if curveID := hs.serverHello.selectedGroup; curveID != 0 {
- curveOK := false
- for _, id := range hs.hello.supportedCurves {
- if id == curveID {
- curveOK = true
- break
- }
- }
- if !curveOK {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported group")
- }
- if hs.ecdheParams.CurveID() == curveID {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share")
- }
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- c.sendAlert(alertInternalError)
- return errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err := generateECDHEParameters(c.config.rand(), curveID)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- hs.ecdheParams = params
- hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
- }
-
- hs.hello.raw = nil
- if len(hs.hello.pskIdentities) > 0 {
- pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
- if pskSuite == nil {
- return c.sendAlert(alertInternalError)
- }
- if pskSuite.hash == hs.suite.hash {
- // Update binders and obfuscated_ticket_age.
- ticketAge := uint32(c.config.time().Sub(hs.session.receivedAt) / time.Millisecond)
- hs.hello.pskIdentities[0].obfuscatedTicketAge = ticketAge + hs.session.ageAdd
-
- transcript := hs.suite.hash.New()
- transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- transcript.Write(chHash)
- transcript.Write(hs.serverHello.marshal())
- transcript.Write(hs.hello.marshalWithoutBinders())
- pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
- hs.hello.updateBinders(pskBinders)
- } else {
- // Server selected a cipher suite incompatible with the PSK.
- hs.hello.pskIdentities = nil
- hs.hello.pskBinders = nil
- }
- }
-
- if hs.hello.earlyData && c.extraConfig != nil && c.extraConfig.Rejected0RTT != nil {
- c.extraConfig.Rejected0RTT()
- }
- hs.hello.earlyData = false // disable 0-RTT
-
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- serverHello, ok := msg.(*serverHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverHello, msg)
- }
- hs.serverHello = serverHello
-
- if err := hs.checkServerHelloOrHRR(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) processServerHello() error {
- c := hs.c
-
- if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: server sent two HelloRetryRequest messages")
- }
-
- if len(hs.serverHello.cookie) != 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server sent a cookie in a normal ServerHello")
- }
-
- if hs.serverHello.selectedGroup != 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: malformed key_share extension")
- }
-
- if hs.serverHello.serverShare.group == 0 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server did not send a key share")
- }
- if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported group")
- }
-
- if !hs.serverHello.selectedIdentityPresent {
- return nil
- }
-
- if int(hs.serverHello.selectedIdentity) >= len(hs.hello.pskIdentities) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid PSK")
- }
-
- if len(hs.hello.pskIdentities) != 1 || hs.session == nil {
- return c.sendAlert(alertInternalError)
- }
- pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
- if pskSuite == nil {
- return c.sendAlert(alertInternalError)
- }
- if pskSuite.hash != hs.suite.hash {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid PSK and cipher suite pair")
- }
-
- hs.usingPSK = true
- c.didResume = true
- c.peerCertificates = hs.session.serverCertificates
- c.verifiedChains = hs.session.verifiedChains
- c.ocspResponse = hs.session.ocspResponse
- c.scts = hs.session.scts
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
- c := hs.c
-
- sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data)
- if sharedKey == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid server key share")
- }
-
- earlySecret := hs.earlySecret
- if !hs.usingPSK {
- earlySecret = hs.suite.extract(nil, nil)
- }
- handshakeSecret := hs.suite.extract(sharedKey,
- hs.suite.deriveSecret(earlySecret, "derived", nil))
-
- clientSecret := hs.suite.deriveSecret(handshakeSecret,
- clientHandshakeTrafficLabel, hs.transcript)
- c.out.exportKey(EncryptionHandshake, hs.suite, clientSecret)
- c.out.setTrafficSecret(hs.suite, clientSecret)
- serverSecret := hs.suite.deriveSecret(handshakeSecret,
- serverHandshakeTrafficLabel, hs.transcript)
- c.in.exportKey(EncryptionHandshake, hs.suite, serverSecret)
- c.in.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.hello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- hs.masterSecret = hs.suite.extract(nil,
- hs.suite.deriveSecret(handshakeSecret, "derived", nil))
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerParameters() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- encryptedExtensions, ok := msg.(*encryptedExtensionsMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(encryptedExtensions, msg)
- }
- // Notify the caller if 0-RTT was rejected.
- if !encryptedExtensions.earlyData && hs.hello.earlyData && c.extraConfig != nil && c.extraConfig.Rejected0RTT != nil {
- c.extraConfig.Rejected0RTT()
- }
- c.used0RTT = encryptedExtensions.earlyData
- if hs.c.extraConfig != nil && hs.c.extraConfig.ReceivedExtensions != nil {
- hs.c.extraConfig.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions)
- }
- hs.transcript.Write(encryptedExtensions.marshal())
-
- if c.extraConfig != nil && c.extraConfig.EnforceNextProtoSelection {
- if len(encryptedExtensions.alpnProtocol) == 0 {
- // the server didn't select an ALPN
- c.sendAlert(alertNoApplicationProtocol)
- return errors.New("ALPN negotiation failed. Server didn't offer any protocols")
- }
- if mutualProtocol([]string{encryptedExtensions.alpnProtocol}, hs.c.config.NextProtos) == "" {
- // the protocol selected by the server was not offered
- c.sendAlert(alertNoApplicationProtocol)
- return fmt.Errorf("ALPN negotiation failed. Server offered: %q", encryptedExtensions.alpnProtocol)
- }
- }
- if encryptedExtensions.alpnProtocol != "" {
- if len(hs.hello.alpnProtocols) == 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server advertised unrequested ALPN extension")
- }
- if mutualProtocol([]string{encryptedExtensions.alpnProtocol}, hs.hello.alpnProtocols) == "" {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server selected unadvertised ALPN protocol")
- }
- c.clientProtocol = encryptedExtensions.alpnProtocol
- }
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
- c := hs.c
-
- // Either a PSK or a certificate is always used, but not both.
- // See RFC 8446, Section 4.1.1.
- if hs.usingPSK {
- // Make sure the connection is still being verified whether or not this
- // is a resumption. Resumptions currently don't reverify certificates so
- // they don't call verifyServerCertificate. See Issue 31641.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- return nil
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- certReq, ok := msg.(*certificateRequestMsgTLS13)
- if ok {
- hs.transcript.Write(certReq.marshal())
-
- hs.certReq = certReq
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- certMsg, ok := msg.(*certificateMsgTLS13)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- if len(certMsg.certificate.Certificate) == 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: received empty certificates message")
- }
- hs.transcript.Write(certMsg.marshal())
-
- c.scts = certMsg.certificate.SignedCertificateTimestamps
- c.ocspResponse = certMsg.certificate.OCSPStaple
-
- if err := c.verifyServerCertificate(certMsg.certificate.Certificate); err != nil {
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- // See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
- if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
- sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the server certificate: " + err.Error())
- }
-
- hs.transcript.Write(certVerify.marshal())
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerFinished() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- finished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(finished, msg)
- }
-
- expectedMAC := hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
- if !hmac.Equal(expectedMAC, finished.verifyData) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid server finished hash")
- }
-
- hs.transcript.Write(finished.marshal())
-
- // Derive secrets that take context through the server Finished.
-
- hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
- clientApplicationTrafficLabel, hs.transcript)
- serverSecret := hs.suite.deriveSecret(hs.masterSecret,
- serverApplicationTrafficLabel, hs.transcript)
- c.in.exportKey(EncryptionApplication, hs.suite, serverSecret)
- c.in.setTrafficSecret(hs.suite, serverSecret)
-
- err = c.config.writeKeyLog(keyLogLabelClientTraffic, hs.hello.random, hs.trafficSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.hello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
- c := hs.c
-
- if hs.certReq == nil {
- return nil
- }
-
- cert, err := c.getClientCertificate(toCertificateRequestInfo(&certificateRequestInfo{
- AcceptableCAs: hs.certReq.certificateAuthorities,
- SignatureSchemes: hs.certReq.supportedSignatureAlgorithms,
- Version: c.vers,
- }))
- if err != nil {
- return err
- }
-
- certMsg := new(certificateMsgTLS13)
-
- certMsg.certificate = *cert
- certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
- certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
-
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- // If we sent an empty certificate message, skip the CertificateVerify.
- if len(cert.Certificate) == 0 {
- return nil
- }
-
- certVerifyMsg := new(certificateVerifyMsg)
- certVerifyMsg.hasSignatureAlgorithm = true
-
- certVerifyMsg.signatureAlgorithm, err = selectSignatureScheme(c.vers, cert, hs.certReq.supportedSignatureAlgorithms)
- if err != nil {
- // getClientCertificate returned a certificate incompatible with the
- // CertificateRequestInfo supported signature algorithms.
- c.sendAlert(alertHandshakeFailure)
- return err
- }
-
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerifyMsg.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
-
- signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: failed to sign handshake: " + err.Error())
- }
- certVerifyMsg.signature = sig
-
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
- c := hs.c
-
- finished := &finishedMsg{
- verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
- }
-
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- c.out.exportKey(EncryptionApplication, hs.suite, hs.trafficSecret)
- c.out.setTrafficSecret(hs.suite, hs.trafficSecret)
-
- if !c.config.SessionTicketsDisabled && c.config.ClientSessionCache != nil {
- c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret,
- resumptionLabel, hs.transcript)
- }
-
- return nil
-}
-
-func (c *Conn) handleNewSessionTicket(msg *newSessionTicketMsgTLS13) error {
- if !c.isClient {
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: received new session ticket from a client")
- }
-
- if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return nil
- }
-
- // See RFC 8446, Section 4.6.1.
- if msg.lifetime == 0 {
- return nil
- }
- lifetime := time.Duration(msg.lifetime) * time.Second
- if lifetime > maxSessionTicketLifetime {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: received a session ticket with invalid lifetime")
- }
-
- cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
- if cipherSuite == nil || c.resumptionSecret == nil {
- return c.sendAlert(alertInternalError)
- }
-
- // We need to save the max_early_data_size that the server sent us, in order
- // to decide if we're going to try 0-RTT with this ticket.
- // However, at the same time, the qtls.ClientSessionTicket needs to be equal to
- // the tls.ClientSessionTicket, so we can't just add a new field to the struct.
- // We therefore abuse the nonce field (which is a byte slice)
- nonceWithEarlyData := make([]byte, len(msg.nonce)+4)
- binary.BigEndian.PutUint32(nonceWithEarlyData, msg.maxEarlyData)
- copy(nonceWithEarlyData[4:], msg.nonce)
-
- var appData []byte
- if c.extraConfig != nil && c.extraConfig.GetAppDataForSessionState != nil {
- appData = c.extraConfig.GetAppDataForSessionState()
- }
- var b cryptobyte.Builder
- b.AddUint16(clientSessionStateVersion) // revision
- b.AddUint32(msg.maxEarlyData)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(appData)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(msg.nonce)
- })
-
- // Save the resumption_master_secret and nonce instead of deriving the PSK
- // to do the least amount of work on NewSessionTicket messages before we
- // know if the ticket will be used. Forward secrecy of resumed connections
- // is guaranteed by the requirement for pskModeDHE.
- session := &clientSessionState{
- sessionTicket: msg.label,
- vers: c.vers,
- cipherSuite: c.cipherSuite,
- masterSecret: c.resumptionSecret,
- serverCertificates: c.peerCertificates,
- verifiedChains: c.verifiedChains,
- receivedAt: c.config.time(),
- nonce: b.BytesOrPanic(),
- useBy: c.config.time().Add(lifetime),
- ageAdd: msg.ageAdd,
- ocspResponse: c.ocspResponse,
- scts: c.scts,
- }
-
- cacheKey := clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
- c.config.ClientSessionCache.Put(cacheKey, toClientSessionState(session))
-
- return nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_messages.go b/vendor/github.com/marten-seemann/qtls-go1-16/handshake_messages.go
deleted file mode 100644
index 1ab757626..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_messages.go
+++ /dev/null
@@ -1,1832 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "fmt"
- "strings"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-// The marshalingFunction type is an adapter to allow the use of ordinary
-// functions as cryptobyte.MarshalingValue.
-type marshalingFunction func(b *cryptobyte.Builder) error
-
-func (f marshalingFunction) Marshal(b *cryptobyte.Builder) error {
- return f(b)
-}
-
-// addBytesWithLength appends a sequence of bytes to the cryptobyte.Builder. If
-// the length of the sequence is not the value specified, it produces an error.
-func addBytesWithLength(b *cryptobyte.Builder, v []byte, n int) {
- b.AddValue(marshalingFunction(func(b *cryptobyte.Builder) error {
- if len(v) != n {
- return fmt.Errorf("invalid value length: expected %d, got %d", n, len(v))
- }
- b.AddBytes(v)
- return nil
- }))
-}
-
-// addUint64 appends a big-endian, 64-bit value to the cryptobyte.Builder.
-func addUint64(b *cryptobyte.Builder, v uint64) {
- b.AddUint32(uint32(v >> 32))
- b.AddUint32(uint32(v))
-}
-
-// readUint64 decodes a big-endian, 64-bit value into out and advances over it.
-// It reports whether the read was successful.
-func readUint64(s *cryptobyte.String, out *uint64) bool {
- var hi, lo uint32
- if !s.ReadUint32(&hi) || !s.ReadUint32(&lo) {
- return false
- }
- *out = uint64(hi)<<32 | uint64(lo)
- return true
-}
-
-// readUint8LengthPrefixed acts like s.ReadUint8LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint8LengthPrefixed((*cryptobyte.String)(out))
-}
-
-// readUint16LengthPrefixed acts like s.ReadUint16LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint16LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint16LengthPrefixed((*cryptobyte.String)(out))
-}
-
-// readUint24LengthPrefixed acts like s.ReadUint24LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint24LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint24LengthPrefixed((*cryptobyte.String)(out))
-}
-
-type clientHelloMsg struct {
- raw []byte
- vers uint16
- random []byte
- sessionId []byte
- cipherSuites []uint16
- compressionMethods []uint8
- serverName string
- ocspStapling bool
- supportedCurves []CurveID
- supportedPoints []uint8
- ticketSupported bool
- sessionTicket []uint8
- supportedSignatureAlgorithms []SignatureScheme
- supportedSignatureAlgorithmsCert []SignatureScheme
- secureRenegotiationSupported bool
- secureRenegotiation []byte
- alpnProtocols []string
- scts bool
- supportedVersions []uint16
- cookie []byte
- keyShares []keyShare
- earlyData bool
- pskModes []uint8
- pskIdentities []pskIdentity
- pskBinders [][]byte
- additionalExtensions []Extension
-}
-
-func (m *clientHelloMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeClientHello)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.vers)
- addBytesWithLength(b, m.random, 32)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionId)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, suite := range m.cipherSuites {
- b.AddUint16(suite)
- }
- })
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.compressionMethods)
- })
-
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.serverName) > 0 {
- // RFC 6066, Section 3
- b.AddUint16(extensionServerName)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // name_type = host_name
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.serverName))
- })
- })
- })
- }
- if m.ocspStapling {
- // RFC 4366, Section 3.6
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(1) // status_type = ocsp
- b.AddUint16(0) // empty responder_id_list
- b.AddUint16(0) // empty request_extensions
- })
- }
- if len(m.supportedCurves) > 0 {
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- b.AddUint16(extensionSupportedCurves)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, curve := range m.supportedCurves {
- b.AddUint16(uint16(curve))
- }
- })
- })
- }
- if len(m.supportedPoints) > 0 {
- // RFC 4492, Section 5.1.2
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
- if m.ticketSupported {
- // RFC 5077, Section 3.2
- b.AddUint16(extensionSessionTicket)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionTicket)
- })
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- // RFC 5246, Section 7.4.1.4.1
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- // RFC 8446, Section 4.2.3
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if m.secureRenegotiationSupported {
- // RFC 5746, Section 3.2
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocols) > 0 {
- // RFC 7301, Section 3.1
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, proto := range m.alpnProtocols {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(proto))
- })
- }
- })
- })
- }
- if m.scts {
- // RFC 6962, Section 3.3.1
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedVersions) > 0 {
- // RFC 8446, Section 4.2.1
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, vers := range m.supportedVersions {
- b.AddUint16(vers)
- }
- })
- })
- }
- if len(m.cookie) > 0 {
- // RFC 8446, Section 4.2.2
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if len(m.keyShares) > 0 {
- // RFC 8446, Section 4.2.8
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ks := range m.keyShares {
- b.AddUint16(uint16(ks.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ks.data)
- })
- }
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.pskModes) > 0 {
- // RFC 8446, Section 4.2.9
- b.AddUint16(extensionPSKModes)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.pskModes)
- })
- })
- }
- for _, ext := range m.additionalExtensions {
- b.AddUint16(ext.Type)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ext.Data)
- })
- }
- if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
- // RFC 8446, Section 4.2.11
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, psk := range m.pskIdentities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(psk.label)
- })
- b.AddUint32(psk.obfuscatedTicketAge)
- }
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-// marshalWithoutBinders returns the ClientHello through the
-// PreSharedKeyExtension.identities field, according to RFC 8446, Section
-// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
-func (m *clientHelloMsg) marshalWithoutBinders() []byte {
- bindersLen := 2 // uint16 length prefix
- for _, binder := range m.pskBinders {
- bindersLen += 1 // uint8 length prefix
- bindersLen += len(binder)
- }
-
- fullMessage := m.marshal()
- return fullMessage[:len(fullMessage)-bindersLen]
-}
-
-// updateBinders updates the m.pskBinders field, if necessary updating the
-// cached marshaled representation. The supplied binders must have the same
-// length as the current m.pskBinders.
-func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
- if len(pskBinders) != len(m.pskBinders) {
- panic("tls: internal error: pskBinders length mismatch")
- }
- for i := range m.pskBinders {
- if len(pskBinders[i]) != len(m.pskBinders[i]) {
- panic("tls: internal error: pskBinders length mismatch")
- }
- }
- m.pskBinders = pskBinders
- if m.raw != nil {
- lenWithoutBinders := len(m.marshalWithoutBinders())
- // TODO(filippo): replace with NewFixedBuilder once CL 148882 is imported.
- b := cryptobyte.NewBuilder(m.raw[:lenWithoutBinders])
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- if len(b.BytesOrPanic()) != len(m.raw) {
- panic("tls: internal error: failed to update binders")
- }
- }
-}
-
-func (m *clientHelloMsg) unmarshal(data []byte) bool {
- *m = clientHelloMsg{raw: data}
- s := cryptobyte.String(data)
-
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
- !readUint8LengthPrefixed(&s, &m.sessionId) {
- return false
- }
-
- var cipherSuites cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&cipherSuites) {
- return false
- }
- m.cipherSuites = []uint16{}
- m.secureRenegotiationSupported = false
- for !cipherSuites.Empty() {
- var suite uint16
- if !cipherSuites.ReadUint16(&suite) {
- return false
- }
- if suite == scsvRenegotiation {
- m.secureRenegotiationSupported = true
- }
- m.cipherSuites = append(m.cipherSuites, suite)
- }
-
- if !readUint8LengthPrefixed(&s, &m.compressionMethods) {
- return false
- }
-
- if s.Empty() {
- // ClientHello is optionally followed by extension data
- return true
- }
-
- var extensions cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var ext uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&ext) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch ext {
- case extensionServerName:
- // RFC 6066, Section 3
- var nameList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
- return false
- }
- for !nameList.Empty() {
- var nameType uint8
- var serverName cryptobyte.String
- if !nameList.ReadUint8(&nameType) ||
- !nameList.ReadUint16LengthPrefixed(&serverName) ||
- serverName.Empty() {
- return false
- }
- if nameType != 0 {
- continue
- }
- if len(m.serverName) != 0 {
- // Multiple names of the same name_type are prohibited.
- return false
- }
- m.serverName = string(serverName)
- // An SNI value may not include a trailing dot.
- if strings.HasSuffix(m.serverName, ".") {
- return false
- }
- }
- case extensionStatusRequest:
- // RFC 4366, Section 3.6
- var statusType uint8
- var ignored cryptobyte.String
- if !extData.ReadUint8(&statusType) ||
- !extData.ReadUint16LengthPrefixed(&ignored) ||
- !extData.ReadUint16LengthPrefixed(&ignored) {
- return false
- }
- m.ocspStapling = statusType == statusTypeOCSP
- case extensionSupportedCurves:
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- var curves cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&curves) || curves.Empty() {
- return false
- }
- for !curves.Empty() {
- var curve uint16
- if !curves.ReadUint16(&curve) {
- return false
- }
- m.supportedCurves = append(m.supportedCurves, CurveID(curve))
- }
- case extensionSupportedPoints:
- // RFC 4492, Section 5.1.2
- if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
- len(m.supportedPoints) == 0 {
- return false
- }
- case extensionSessionTicket:
- // RFC 5077, Section 3.2
- m.ticketSupported = true
- extData.ReadBytes(&m.sessionTicket, len(extData))
- case extensionSignatureAlgorithms:
- // RFC 5246, Section 7.4.1.4.1
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithms = append(
- m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
- }
- case extensionSignatureAlgorithmsCert:
- // RFC 8446, Section 4.2.3
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithmsCert = append(
- m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
- }
- case extensionRenegotiationInfo:
- // RFC 5746, Section 3.2
- if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
- return false
- }
- m.secureRenegotiationSupported = true
- case extensionALPN:
- // RFC 7301, Section 3.1
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- for !protoList.Empty() {
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
- return false
- }
- m.alpnProtocols = append(m.alpnProtocols, string(proto))
- }
- case extensionSCT:
- // RFC 6962, Section 3.3.1
- m.scts = true
- case extensionSupportedVersions:
- // RFC 8446, Section 4.2.1
- var versList cryptobyte.String
- if !extData.ReadUint8LengthPrefixed(&versList) || versList.Empty() {
- return false
- }
- for !versList.Empty() {
- var vers uint16
- if !versList.ReadUint16(&vers) {
- return false
- }
- m.supportedVersions = append(m.supportedVersions, vers)
- }
- case extensionCookie:
- // RFC 8446, Section 4.2.2
- if !readUint16LengthPrefixed(&extData, &m.cookie) ||
- len(m.cookie) == 0 {
- return false
- }
- case extensionKeyShare:
- // RFC 8446, Section 4.2.8
- var clientShares cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&clientShares) {
- return false
- }
- for !clientShares.Empty() {
- var ks keyShare
- if !clientShares.ReadUint16((*uint16)(&ks.group)) ||
- !readUint16LengthPrefixed(&clientShares, &ks.data) ||
- len(ks.data) == 0 {
- return false
- }
- m.keyShares = append(m.keyShares, ks)
- }
- case extensionEarlyData:
- // RFC 8446, Section 4.2.10
- m.earlyData = true
- case extensionPSKModes:
- // RFC 8446, Section 4.2.9
- if !readUint8LengthPrefixed(&extData, &m.pskModes) {
- return false
- }
- case extensionPreSharedKey:
- // RFC 8446, Section 4.2.11
- if !extensions.Empty() {
- return false // pre_shared_key must be the last extension
- }
- var identities cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&identities) || identities.Empty() {
- return false
- }
- for !identities.Empty() {
- var psk pskIdentity
- if !readUint16LengthPrefixed(&identities, &psk.label) ||
- !identities.ReadUint32(&psk.obfuscatedTicketAge) ||
- len(psk.label) == 0 {
- return false
- }
- m.pskIdentities = append(m.pskIdentities, psk)
- }
- var binders cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&binders) || binders.Empty() {
- return false
- }
- for !binders.Empty() {
- var binder []byte
- if !readUint8LengthPrefixed(&binders, &binder) ||
- len(binder) == 0 {
- return false
- }
- m.pskBinders = append(m.pskBinders, binder)
- }
- default:
- m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData})
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type serverHelloMsg struct {
- raw []byte
- vers uint16
- random []byte
- sessionId []byte
- cipherSuite uint16
- compressionMethod uint8
- ocspStapling bool
- ticketSupported bool
- secureRenegotiationSupported bool
- secureRenegotiation []byte
- alpnProtocol string
- scts [][]byte
- supportedVersion uint16
- serverShare keyShare
- selectedIdentityPresent bool
- selectedIdentity uint16
- supportedPoints []uint8
-
- // HelloRetryRequest extensions
- cookie []byte
- selectedGroup CurveID
-}
-
-func (m *serverHelloMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeServerHello)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.vers)
- addBytesWithLength(b, m.random, 32)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionId)
- })
- b.AddUint16(m.cipherSuite)
- b.AddUint8(m.compressionMethod)
-
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.ticketSupported {
- b.AddUint16(extensionSessionTicket)
- b.AddUint16(0) // empty extension_data
- }
- if m.secureRenegotiationSupported {
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if len(m.scts) > 0 {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range m.scts {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- if m.supportedVersion != 0 {
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.supportedVersion)
- })
- }
- if m.serverShare.group != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.serverShare.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.serverShare.data)
- })
- })
- }
- if m.selectedIdentityPresent {
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.selectedIdentity)
- })
- }
-
- if len(m.cookie) > 0 {
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if m.selectedGroup != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.selectedGroup))
- })
- }
- if len(m.supportedPoints) > 0 {
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *serverHelloMsg) unmarshal(data []byte) bool {
- *m = serverHelloMsg{raw: data}
- s := cryptobyte.String(data)
-
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
- !readUint8LengthPrefixed(&s, &m.sessionId) ||
- !s.ReadUint16(&m.cipherSuite) ||
- !s.ReadUint8(&m.compressionMethod) {
- return false
- }
-
- if s.Empty() {
- // ServerHello is optionally followed by extension data
- return true
- }
-
- var extensions cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionStatusRequest:
- m.ocspStapling = true
- case extensionSessionTicket:
- m.ticketSupported = true
- case extensionRenegotiationInfo:
- if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
- return false
- }
- m.secureRenegotiationSupported = true
- case extensionALPN:
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) ||
- proto.Empty() || !protoList.Empty() {
- return false
- }
- m.alpnProtocol = string(proto)
- case extensionSCT:
- var sctList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
- return false
- }
- for !sctList.Empty() {
- var sct []byte
- if !readUint16LengthPrefixed(&sctList, &sct) ||
- len(sct) == 0 {
- return false
- }
- m.scts = append(m.scts, sct)
- }
- case extensionSupportedVersions:
- if !extData.ReadUint16(&m.supportedVersion) {
- return false
- }
- case extensionCookie:
- if !readUint16LengthPrefixed(&extData, &m.cookie) ||
- len(m.cookie) == 0 {
- return false
- }
- case extensionKeyShare:
- // This extension has different formats in SH and HRR, accept either
- // and let the handshake logic decide. See RFC 8446, Section 4.2.8.
- if len(extData) == 2 {
- if !extData.ReadUint16((*uint16)(&m.selectedGroup)) {
- return false
- }
- } else {
- if !extData.ReadUint16((*uint16)(&m.serverShare.group)) ||
- !readUint16LengthPrefixed(&extData, &m.serverShare.data) {
- return false
- }
- }
- case extensionPreSharedKey:
- m.selectedIdentityPresent = true
- if !extData.ReadUint16(&m.selectedIdentity) {
- return false
- }
- case extensionSupportedPoints:
- // RFC 4492, Section 5.1.2
- if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
- len(m.supportedPoints) == 0 {
- return false
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type encryptedExtensionsMsg struct {
- raw []byte
- alpnProtocol string
- earlyData bool
-
- additionalExtensions []Extension
-}
-
-func (m *encryptedExtensionsMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeEncryptedExtensions)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- for _, ext := range m.additionalExtensions {
- b.AddUint16(ext.Type)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ext.Data)
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
- *m = encryptedExtensionsMsg{raw: data}
- s := cryptobyte.String(data)
-
- var extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var ext uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&ext) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch ext {
- case extensionALPN:
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) ||
- proto.Empty() || !protoList.Empty() {
- return false
- }
- m.alpnProtocol = string(proto)
- case extensionEarlyData:
- m.earlyData = true
- default:
- m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData})
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type endOfEarlyDataMsg struct{}
-
-func (m *endOfEarlyDataMsg) marshal() []byte {
- x := make([]byte, 4)
- x[0] = typeEndOfEarlyData
- return x
-}
-
-func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
-
-type keyUpdateMsg struct {
- raw []byte
- updateRequested bool
-}
-
-func (m *keyUpdateMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeKeyUpdate)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.updateRequested {
- b.AddUint8(1)
- } else {
- b.AddUint8(0)
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *keyUpdateMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- var updateRequested uint8
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8(&updateRequested) || !s.Empty() {
- return false
- }
- switch updateRequested {
- case 0:
- m.updateRequested = false
- case 1:
- m.updateRequested = true
- default:
- return false
- }
- return true
-}
-
-type newSessionTicketMsgTLS13 struct {
- raw []byte
- lifetime uint32
- ageAdd uint32
- nonce []byte
- label []byte
- maxEarlyData uint32
-}
-
-func (m *newSessionTicketMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeNewSessionTicket)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint32(m.lifetime)
- b.AddUint32(m.ageAdd)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.nonce)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.label)
- })
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.maxEarlyData > 0 {
- b.AddUint16(extensionEarlyData)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint32(m.maxEarlyData)
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
- *m = newSessionTicketMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint32(&m.lifetime) ||
- !s.ReadUint32(&m.ageAdd) ||
- !readUint8LengthPrefixed(&s, &m.nonce) ||
- !readUint16LengthPrefixed(&s, &m.label) ||
- !s.ReadUint16LengthPrefixed(&extensions) ||
- !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionEarlyData:
- if !extData.ReadUint32(&m.maxEarlyData) {
- return false
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type certificateRequestMsgTLS13 struct {
- raw []byte
- ocspStapling bool
- scts bool
- supportedSignatureAlgorithms []SignatureScheme
- supportedSignatureAlgorithmsCert []SignatureScheme
- certificateAuthorities [][]byte
-}
-
-func (m *certificateRequestMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateRequest)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- // certificate_request_context (SHALL be zero length unless used for
- // post-handshake authentication)
- b.AddUint8(0)
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.scts {
- // RFC 8446, Section 4.4.2.1 makes no mention of
- // signed_certificate_timestamp in CertificateRequest, but
- // "Extensions in the Certificate message from the client MUST
- // correspond to extensions in the CertificateRequest message
- // from the server." and it appears in the table in Section 4.2.
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.certificateAuthorities) > 0 {
- b.AddUint16(extensionCertificateAuthorities)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ca := range m.certificateAuthorities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ca)
- })
- }
- })
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
- *m = certificateRequestMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var context, extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
- !s.ReadUint16LengthPrefixed(&extensions) ||
- !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionStatusRequest:
- m.ocspStapling = true
- case extensionSCT:
- m.scts = true
- case extensionSignatureAlgorithms:
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithms = append(
- m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
- }
- case extensionSignatureAlgorithmsCert:
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithmsCert = append(
- m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
- }
- case extensionCertificateAuthorities:
- var auths cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&auths) || auths.Empty() {
- return false
- }
- for !auths.Empty() {
- var ca []byte
- if !readUint16LengthPrefixed(&auths, &ca) || len(ca) == 0 {
- return false
- }
- m.certificateAuthorities = append(m.certificateAuthorities, ca)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type certificateMsg struct {
- raw []byte
- certificates [][]byte
-}
-
-func (m *certificateMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- var i int
- for _, slice := range m.certificates {
- i += len(slice)
- }
-
- length := 3 + 3*len(m.certificates) + i
- x = make([]byte, 4+length)
- x[0] = typeCertificate
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
-
- certificateOctets := length - 3
- x[4] = uint8(certificateOctets >> 16)
- x[5] = uint8(certificateOctets >> 8)
- x[6] = uint8(certificateOctets)
-
- y := x[7:]
- for _, slice := range m.certificates {
- y[0] = uint8(len(slice) >> 16)
- y[1] = uint8(len(slice) >> 8)
- y[2] = uint8(len(slice))
- copy(y[3:], slice)
- y = y[3+len(slice):]
- }
-
- m.raw = x
- return
-}
-
-func (m *certificateMsg) unmarshal(data []byte) bool {
- if len(data) < 7 {
- return false
- }
-
- m.raw = data
- certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6])
- if uint32(len(data)) != certsLen+7 {
- return false
- }
-
- numCerts := 0
- d := data[7:]
- for certsLen > 0 {
- if len(d) < 4 {
- return false
- }
- certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
- if uint32(len(d)) < 3+certLen {
- return false
- }
- d = d[3+certLen:]
- certsLen -= 3 + certLen
- numCerts++
- }
-
- m.certificates = make([][]byte, numCerts)
- d = data[7:]
- for i := 0; i < numCerts; i++ {
- certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
- m.certificates[i] = d[3 : 3+certLen]
- d = d[3+certLen:]
- }
-
- return true
-}
-
-type certificateMsgTLS13 struct {
- raw []byte
- certificate Certificate
- ocspStapling bool
- scts bool
-}
-
-func (m *certificateMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificate)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // certificate_request_context
-
- certificate := m.certificate
- if !m.ocspStapling {
- certificate.OCSPStaple = nil
- }
- if !m.scts {
- certificate.SignedCertificateTimestamps = nil
- }
- marshalCertificate(b, certificate)
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- for i, cert := range certificate.Certificate {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(cert)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if i > 0 {
- // This library only supports OCSP and SCT for leaf certificates.
- return
- }
- if certificate.OCSPStaple != nil {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(statusTypeOCSP)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(certificate.OCSPStaple)
- })
- })
- }
- if certificate.SignedCertificateTimestamps != nil {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range certificate.SignedCertificateTimestamps {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- })
- }
- })
-}
-
-func (m *certificateMsgTLS13) unmarshal(data []byte) bool {
- *m = certificateMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var context cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
- !unmarshalCertificate(&s, &m.certificate) ||
- !s.Empty() {
- return false
- }
-
- m.scts = m.certificate.SignedCertificateTimestamps != nil
- m.ocspStapling = m.certificate.OCSPStaple != nil
-
- return true
-}
-
-func unmarshalCertificate(s *cryptobyte.String, certificate *Certificate) bool {
- var certList cryptobyte.String
- if !s.ReadUint24LengthPrefixed(&certList) {
- return false
- }
- for !certList.Empty() {
- var cert []byte
- var extensions cryptobyte.String
- if !readUint24LengthPrefixed(&certList, &cert) ||
- !certList.ReadUint16LengthPrefixed(&extensions) {
- return false
- }
- certificate.Certificate = append(certificate.Certificate, cert)
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
- if len(certificate.Certificate) > 1 {
- // This library only supports OCSP and SCT for leaf certificates.
- continue
- }
-
- switch extension {
- case extensionStatusRequest:
- var statusType uint8
- if !extData.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
- !readUint24LengthPrefixed(&extData, &certificate.OCSPStaple) ||
- len(certificate.OCSPStaple) == 0 {
- return false
- }
- case extensionSCT:
- var sctList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
- return false
- }
- for !sctList.Empty() {
- var sct []byte
- if !readUint16LengthPrefixed(&sctList, &sct) ||
- len(sct) == 0 {
- return false
- }
- certificate.SignedCertificateTimestamps = append(
- certificate.SignedCertificateTimestamps, sct)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
- }
- return true
-}
-
-type serverKeyExchangeMsg struct {
- raw []byte
- key []byte
-}
-
-func (m *serverKeyExchangeMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
- length := len(m.key)
- x := make([]byte, length+4)
- x[0] = typeServerKeyExchange
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- copy(x[4:], m.key)
-
- m.raw = x
- return x
-}
-
-func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
- m.raw = data
- if len(data) < 4 {
- return false
- }
- m.key = data[4:]
- return true
-}
-
-type certificateStatusMsg struct {
- raw []byte
- response []byte
-}
-
-func (m *certificateStatusMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateStatus)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(statusTypeOCSP)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.response)
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateStatusMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- var statusType uint8
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
- !readUint24LengthPrefixed(&s, &m.response) ||
- len(m.response) == 0 || !s.Empty() {
- return false
- }
- return true
-}
-
-type serverHelloDoneMsg struct{}
-
-func (m *serverHelloDoneMsg) marshal() []byte {
- x := make([]byte, 4)
- x[0] = typeServerHelloDone
- return x
-}
-
-func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
-
-type clientKeyExchangeMsg struct {
- raw []byte
- ciphertext []byte
-}
-
-func (m *clientKeyExchangeMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
- length := len(m.ciphertext)
- x := make([]byte, length+4)
- x[0] = typeClientKeyExchange
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- copy(x[4:], m.ciphertext)
-
- m.raw = x
- return x
-}
-
-func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
- m.raw = data
- if len(data) < 4 {
- return false
- }
- l := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
- if l != len(data)-4 {
- return false
- }
- m.ciphertext = data[4:]
- return true
-}
-
-type finishedMsg struct {
- raw []byte
- verifyData []byte
-}
-
-func (m *finishedMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeFinished)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.verifyData)
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *finishedMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
- return s.Skip(1) &&
- readUint24LengthPrefixed(&s, &m.verifyData) &&
- s.Empty()
-}
-
-type certificateRequestMsg struct {
- raw []byte
- // hasSignatureAlgorithm indicates whether this message includes a list of
- // supported signature algorithms. This change was introduced with TLS 1.2.
- hasSignatureAlgorithm bool
-
- certificateTypes []byte
- supportedSignatureAlgorithms []SignatureScheme
- certificateAuthorities [][]byte
-}
-
-func (m *certificateRequestMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- // See RFC 4346, Section 7.4.4.
- length := 1 + len(m.certificateTypes) + 2
- casLength := 0
- for _, ca := range m.certificateAuthorities {
- casLength += 2 + len(ca)
- }
- length += casLength
-
- if m.hasSignatureAlgorithm {
- length += 2 + 2*len(m.supportedSignatureAlgorithms)
- }
-
- x = make([]byte, 4+length)
- x[0] = typeCertificateRequest
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
-
- x[4] = uint8(len(m.certificateTypes))
-
- copy(x[5:], m.certificateTypes)
- y := x[5+len(m.certificateTypes):]
-
- if m.hasSignatureAlgorithm {
- n := len(m.supportedSignatureAlgorithms) * 2
- y[0] = uint8(n >> 8)
- y[1] = uint8(n)
- y = y[2:]
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- y[0] = uint8(sigAlgo >> 8)
- y[1] = uint8(sigAlgo)
- y = y[2:]
- }
- }
-
- y[0] = uint8(casLength >> 8)
- y[1] = uint8(casLength)
- y = y[2:]
- for _, ca := range m.certificateAuthorities {
- y[0] = uint8(len(ca) >> 8)
- y[1] = uint8(len(ca))
- y = y[2:]
- copy(y, ca)
- y = y[len(ca):]
- }
-
- m.raw = x
- return
-}
-
-func (m *certificateRequestMsg) unmarshal(data []byte) bool {
- m.raw = data
-
- if len(data) < 5 {
- return false
- }
-
- length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
- if uint32(len(data))-4 != length {
- return false
- }
-
- numCertTypes := int(data[4])
- data = data[5:]
- if numCertTypes == 0 || len(data) <= numCertTypes {
- return false
- }
-
- m.certificateTypes = make([]byte, numCertTypes)
- if copy(m.certificateTypes, data) != numCertTypes {
- return false
- }
-
- data = data[numCertTypes:]
-
- if m.hasSignatureAlgorithm {
- if len(data) < 2 {
- return false
- }
- sigAndHashLen := uint16(data[0])<<8 | uint16(data[1])
- data = data[2:]
- if sigAndHashLen&1 != 0 {
- return false
- }
- if len(data) < int(sigAndHashLen) {
- return false
- }
- numSigAlgos := sigAndHashLen / 2
- m.supportedSignatureAlgorithms = make([]SignatureScheme, numSigAlgos)
- for i := range m.supportedSignatureAlgorithms {
- m.supportedSignatureAlgorithms[i] = SignatureScheme(data[0])<<8 | SignatureScheme(data[1])
- data = data[2:]
- }
- }
-
- if len(data) < 2 {
- return false
- }
- casLength := uint16(data[0])<<8 | uint16(data[1])
- data = data[2:]
- if len(data) < int(casLength) {
- return false
- }
- cas := make([]byte, casLength)
- copy(cas, data)
- data = data[casLength:]
-
- m.certificateAuthorities = nil
- for len(cas) > 0 {
- if len(cas) < 2 {
- return false
- }
- caLen := uint16(cas[0])<<8 | uint16(cas[1])
- cas = cas[2:]
-
- if len(cas) < int(caLen) {
- return false
- }
-
- m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen])
- cas = cas[caLen:]
- }
-
- return len(data) == 0
-}
-
-type certificateVerifyMsg struct {
- raw []byte
- hasSignatureAlgorithm bool // format change introduced in TLS 1.2
- signatureAlgorithm SignatureScheme
- signature []byte
-}
-
-func (m *certificateVerifyMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateVerify)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.hasSignatureAlgorithm {
- b.AddUint16(uint16(m.signatureAlgorithm))
- }
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.signature)
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- if !s.Skip(4) { // message type and uint24 length field
- return false
- }
- if m.hasSignatureAlgorithm {
- if !s.ReadUint16((*uint16)(&m.signatureAlgorithm)) {
- return false
- }
- }
- return readUint16LengthPrefixed(&s, &m.signature) && s.Empty()
-}
-
-type newSessionTicketMsg struct {
- raw []byte
- ticket []byte
-}
-
-func (m *newSessionTicketMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- // See RFC 5077, Section 3.3.
- ticketLen := len(m.ticket)
- length := 2 + 4 + ticketLen
- x = make([]byte, 4+length)
- x[0] = typeNewSessionTicket
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- x[8] = uint8(ticketLen >> 8)
- x[9] = uint8(ticketLen)
- copy(x[10:], m.ticket)
-
- m.raw = x
-
- return
-}
-
-func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
- m.raw = data
-
- if len(data) < 10 {
- return false
- }
-
- length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
- if uint32(len(data))-4 != length {
- return false
- }
-
- ticketLen := int(data[8])<<8 + int(data[9])
- if len(data)-10 != ticketLen {
- return false
- }
-
- m.ticket = data[10:]
-
- return true
-}
-
-type helloRequestMsg struct {
-}
-
-func (*helloRequestMsg) marshal() []byte {
- return []byte{typeHelloRequest, 0, 0, 0}
-}
-
-func (*helloRequestMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_server.go b/vendor/github.com/marten-seemann/qtls-go1-16/handshake_server.go
deleted file mode 100644
index 5d39cc8fc..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_server.go
+++ /dev/null
@@ -1,878 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "sync/atomic"
- "time"
-)
-
-// serverHandshakeState contains details of a server handshake in progress.
-// It's discarded once the handshake has completed.
-type serverHandshakeState struct {
- c *Conn
- clientHello *clientHelloMsg
- hello *serverHelloMsg
- suite *cipherSuite
- ecdheOk bool
- ecSignOk bool
- rsaDecryptOk bool
- rsaSignOk bool
- sessionState *sessionState
- finishedHash finishedHash
- masterSecret []byte
- cert *Certificate
-}
-
-// serverHandshake performs a TLS handshake as a server.
-func (c *Conn) serverHandshake() error {
- c.setAlternativeRecordLayer()
-
- clientHello, err := c.readClientHello()
- if err != nil {
- return err
- }
-
- if c.vers == VersionTLS13 {
- hs := serverHandshakeStateTLS13{
- c: c,
- clientHello: clientHello,
- }
- return hs.handshake()
- } else if c.extraConfig.usesAlternativeRecordLayer() {
- // This should already have been caught by the check that the ClientHello doesn't
- // offer any (supported) versions older than TLS 1.3.
- // Check again to make sure we can't be tricked into using an older version.
- c.sendAlert(alertProtocolVersion)
- return errors.New("tls: negotiated TLS < 1.3 when using QUIC")
- }
-
- hs := serverHandshakeState{
- c: c,
- clientHello: clientHello,
- }
- return hs.handshake()
-}
-
-func (hs *serverHandshakeState) handshake() error {
- c := hs.c
-
- if err := hs.processClientHello(); err != nil {
- return err
- }
-
- // For an overview of TLS handshaking, see RFC 5246, Section 7.3.
- c.buffering = true
- if hs.checkForResumption() {
- // The client has included a session ticket and so we do an abbreviated handshake.
- c.didResume = true
- if err := hs.doResumeHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.sendSessionTicket(); err != nil {
- return err
- }
- if err := hs.sendFinished(c.serverFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- c.clientFinishedIsFirst = false
- if err := hs.readFinished(nil); err != nil {
- return err
- }
- } else {
- // The client didn't include a session ticket, or it wasn't
- // valid so we do a full handshake.
- if err := hs.pickCipherSuite(); err != nil {
- return err
- }
- if err := hs.doFullHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.readFinished(c.clientFinished[:]); err != nil {
- return err
- }
- c.clientFinishedIsFirst = true
- c.buffering = true
- if err := hs.sendSessionTicket(); err != nil {
- return err
- }
- if err := hs.sendFinished(nil); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- }
-
- c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-// readClientHello reads a ClientHello message and selects the protocol version.
-func (c *Conn) readClientHello() (*clientHelloMsg, error) {
- msg, err := c.readHandshake()
- if err != nil {
- return nil, err
- }
- clientHello, ok := msg.(*clientHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return nil, unexpectedMessageError(clientHello, msg)
- }
-
- var configForClient *config
- originalConfig := c.config
- if c.config.GetConfigForClient != nil {
- chi := newClientHelloInfo(c, clientHello)
- if cfc, err := c.config.GetConfigForClient(chi); err != nil {
- c.sendAlert(alertInternalError)
- return nil, err
- } else if cfc != nil {
- configForClient = fromConfig(cfc)
- c.config = configForClient
- }
- }
- c.ticketKeys = originalConfig.ticketKeys(configForClient)
-
- clientVersions := clientHello.supportedVersions
- if len(clientHello.supportedVersions) == 0 {
- clientVersions = supportedVersionsFromMax(clientHello.vers)
- }
- if c.extraConfig.usesAlternativeRecordLayer() {
- // In QUIC, the client MUST NOT offer any old TLS versions.
- // Here, we can only check that none of the other supported versions of this library
- // (TLS 1.0 - TLS 1.2) is offered. We don't check for any SSL versions here.
- for _, ver := range clientVersions {
- if ver == VersionTLS13 {
- continue
- }
- for _, v := range supportedVersions {
- if ver == v {
- c.sendAlert(alertProtocolVersion)
- return nil, fmt.Errorf("tls: client offered old TLS version %#x", ver)
- }
- }
- }
- // Make the config we're using allows us to use TLS 1.3.
- if c.config.maxSupportedVersion() < VersionTLS13 {
- c.sendAlert(alertInternalError)
- return nil, errors.New("tls: MaxVersion prevents QUIC from using TLS 1.3")
- }
- }
- c.vers, ok = c.config.mutualVersion(clientVersions)
- if !ok {
- c.sendAlert(alertProtocolVersion)
- return nil, fmt.Errorf("tls: client offered only unsupported versions: %x", clientVersions)
- }
- c.haveVers = true
- c.in.version = c.vers
- c.out.version = c.vers
-
- return clientHello, nil
-}
-
-func (hs *serverHandshakeState) processClientHello() error {
- c := hs.c
-
- hs.hello = new(serverHelloMsg)
- hs.hello.vers = c.vers
-
- foundCompression := false
- // We only support null compression, so check that the client offered it.
- for _, compression := range hs.clientHello.compressionMethods {
- if compression == compressionNone {
- foundCompression = true
- break
- }
- }
-
- if !foundCompression {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: client does not support uncompressed connections")
- }
-
- hs.hello.random = make([]byte, 32)
- serverRandom := hs.hello.random
- // Downgrade protection canaries. See RFC 8446, Section 4.1.3.
- maxVers := c.config.maxSupportedVersion()
- if maxVers >= VersionTLS12 && c.vers < maxVers || testingOnlyForceDowngradeCanary {
- if c.vers == VersionTLS12 {
- copy(serverRandom[24:], downgradeCanaryTLS12)
- } else {
- copy(serverRandom[24:], downgradeCanaryTLS11)
- }
- serverRandom = serverRandom[:24]
- }
- _, err := io.ReadFull(c.config.rand(), serverRandom)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if len(hs.clientHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
-
- hs.hello.secureRenegotiationSupported = hs.clientHello.secureRenegotiationSupported
- hs.hello.compressionMethod = compressionNone
- if len(hs.clientHello.serverName) > 0 {
- c.serverName = hs.clientHello.serverName
- }
-
- if len(hs.clientHello.alpnProtocols) > 0 {
- if selectedProto := mutualProtocol(hs.clientHello.alpnProtocols, c.config.NextProtos); selectedProto != "" {
- hs.hello.alpnProtocol = selectedProto
- c.clientProtocol = selectedProto
- }
- }
-
- hs.cert, err = c.config.getCertificate(newClientHelloInfo(c, hs.clientHello))
- if err != nil {
- if err == errNoCertificates {
- c.sendAlert(alertUnrecognizedName)
- } else {
- c.sendAlert(alertInternalError)
- }
- return err
- }
- if hs.clientHello.scts {
- hs.hello.scts = hs.cert.SignedCertificateTimestamps
- }
-
- hs.ecdheOk = supportsECDHE(c.config, hs.clientHello.supportedCurves, hs.clientHello.supportedPoints)
-
- if hs.ecdheOk {
- // Although omitting the ec_point_formats extension is permitted, some
- // old OpenSSL version will refuse to handshake if not present.
- //
- // Per RFC 4492, section 5.1.2, implementations MUST support the
- // uncompressed point format. See golang.org/issue/31943.
- hs.hello.supportedPoints = []uint8{pointFormatUncompressed}
- }
-
- if priv, ok := hs.cert.PrivateKey.(crypto.Signer); ok {
- switch priv.Public().(type) {
- case *ecdsa.PublicKey:
- hs.ecSignOk = true
- case ed25519.PublicKey:
- hs.ecSignOk = true
- case *rsa.PublicKey:
- hs.rsaSignOk = true
- default:
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: unsupported signing key type (%T)", priv.Public())
- }
- }
- if priv, ok := hs.cert.PrivateKey.(crypto.Decrypter); ok {
- switch priv.Public().(type) {
- case *rsa.PublicKey:
- hs.rsaDecryptOk = true
- default:
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: unsupported decryption key type (%T)", priv.Public())
- }
- }
-
- return nil
-}
-
-// supportsECDHE returns whether ECDHE key exchanges can be used with this
-// pre-TLS 1.3 client.
-func supportsECDHE(c *config, supportedCurves []CurveID, supportedPoints []uint8) bool {
- supportsCurve := false
- for _, curve := range supportedCurves {
- if c.supportsCurve(curve) {
- supportsCurve = true
- break
- }
- }
-
- supportsPointFormat := false
- for _, pointFormat := range supportedPoints {
- if pointFormat == pointFormatUncompressed {
- supportsPointFormat = true
- break
- }
- }
-
- return supportsCurve && supportsPointFormat
-}
-
-func (hs *serverHandshakeState) pickCipherSuite() error {
- c := hs.c
-
- var preferenceList, supportedList []uint16
- if c.config.PreferServerCipherSuites {
- preferenceList = c.config.cipherSuites()
- supportedList = hs.clientHello.cipherSuites
-
- // If the client does not seem to have hardware support for AES-GCM,
- // and the application did not specify a cipher suite preference order,
- // prefer other AEAD ciphers even if we prioritized AES-GCM ciphers
- // by default.
- if c.config.CipherSuites == nil && !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceList = deprioritizeAES(preferenceList)
- }
- } else {
- preferenceList = hs.clientHello.cipherSuites
- supportedList = c.config.cipherSuites()
-
- // If we don't have hardware support for AES-GCM, prefer other AEAD
- // ciphers even if the client prioritized AES-GCM.
- if !hasAESGCMHardwareSupport {
- preferenceList = deprioritizeAES(preferenceList)
- }
- }
-
- hs.suite = selectCipherSuite(preferenceList, supportedList, hs.cipherSuiteOk)
- if hs.suite == nil {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no cipher suite supported by both client and server")
- }
- c.cipherSuite = hs.suite.id
-
- for _, id := range hs.clientHello.cipherSuites {
- if id == TLS_FALLBACK_SCSV {
- // The client is doing a fallback connection. See RFC 7507.
- if hs.clientHello.vers < c.config.maxSupportedVersion() {
- c.sendAlert(alertInappropriateFallback)
- return errors.New("tls: client using inappropriate protocol fallback")
- }
- break
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeState) cipherSuiteOk(c *cipherSuite) bool {
- if c.flags&suiteECDHE != 0 {
- if !hs.ecdheOk {
- return false
- }
- if c.flags&suiteECSign != 0 {
- if !hs.ecSignOk {
- return false
- }
- } else if !hs.rsaSignOk {
- return false
- }
- } else if !hs.rsaDecryptOk {
- return false
- }
- if hs.c.vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
-}
-
-// checkForResumption reports whether we should perform resumption on this connection.
-func (hs *serverHandshakeState) checkForResumption() bool {
- c := hs.c
-
- if c.config.SessionTicketsDisabled {
- return false
- }
-
- plaintext, usedOldKey := c.decryptTicket(hs.clientHello.sessionTicket)
- if plaintext == nil {
- return false
- }
- hs.sessionState = &sessionState{usedOldKey: usedOldKey}
- ok := hs.sessionState.unmarshal(plaintext)
- if !ok {
- return false
- }
-
- createdAt := time.Unix(int64(hs.sessionState.createdAt), 0)
- if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
- return false
- }
-
- // Never resume a session for a different TLS version.
- if c.vers != hs.sessionState.vers {
- return false
- }
-
- cipherSuiteOk := false
- // Check that the client is still offering the ciphersuite in the session.
- for _, id := range hs.clientHello.cipherSuites {
- if id == hs.sessionState.cipherSuite {
- cipherSuiteOk = true
- break
- }
- }
- if !cipherSuiteOk {
- return false
- }
-
- // Check that we also support the ciphersuite from the session.
- hs.suite = selectCipherSuite([]uint16{hs.sessionState.cipherSuite},
- c.config.cipherSuites(), hs.cipherSuiteOk)
- if hs.suite == nil {
- return false
- }
-
- sessionHasClientCerts := len(hs.sessionState.certificates) != 0
- needClientCerts := requiresClientCert(c.config.ClientAuth)
- if needClientCerts && !sessionHasClientCerts {
- return false
- }
- if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
- return false
- }
-
- return true
-}
-
-func (hs *serverHandshakeState) doResumeHandshake() error {
- c := hs.c
-
- hs.hello.cipherSuite = hs.suite.id
- c.cipherSuite = hs.suite.id
- // We echo the client's session ID in the ServerHello to let it know
- // that we're doing a resumption.
- hs.hello.sessionId = hs.clientHello.sessionId
- hs.hello.ticketSupported = hs.sessionState.usedOldKey
- hs.finishedHash = newFinishedHash(c.vers, hs.suite)
- hs.finishedHash.discardHandshakeBuffer()
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- if err := c.processCertsFromClient(Certificate{
- Certificate: hs.sessionState.certificates,
- }); err != nil {
- return err
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- hs.masterSecret = hs.sessionState.masterSecret
-
- return nil
-}
-
-func (hs *serverHandshakeState) doFullHandshake() error {
- c := hs.c
-
- if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 {
- hs.hello.ocspStapling = true
- }
-
- hs.hello.ticketSupported = hs.clientHello.ticketSupported && !c.config.SessionTicketsDisabled
- hs.hello.cipherSuite = hs.suite.id
-
- hs.finishedHash = newFinishedHash(hs.c.vers, hs.suite)
- if c.config.ClientAuth == NoClientCert {
- // No need to keep a full record of the handshake if client
- // certificates won't be used.
- hs.finishedHash.discardHandshakeBuffer()
- }
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- certMsg := new(certificateMsg)
- certMsg.certificates = hs.cert.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- if hs.hello.ocspStapling {
- certStatus := new(certificateStatusMsg)
- certStatus.response = hs.cert.OCSPStaple
- hs.finishedHash.Write(certStatus.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
- return err
- }
- }
-
- keyAgreement := hs.suite.ka(c.vers)
- skx, err := keyAgreement.generateServerKeyExchange(c.config, hs.cert, hs.clientHello, hs.hello)
- if err != nil {
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- if skx != nil {
- hs.finishedHash.Write(skx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
- return err
- }
- }
-
- var certReq *certificateRequestMsg
- if c.config.ClientAuth >= RequestClientCert {
- // Request a client certificate
- certReq = new(certificateRequestMsg)
- certReq.certificateTypes = []byte{
- byte(certTypeRSASign),
- byte(certTypeECDSASign),
- }
- if c.vers >= VersionTLS12 {
- certReq.hasSignatureAlgorithm = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- }
-
- // An empty list of certificateAuthorities signals to
- // the client that it may send any certificate in response
- // to our request. When we know the CAs we trust, then
- // we can send them down, so that the client can choose
- // an appropriate certificate to give to us.
- if c.config.ClientCAs != nil {
- certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
- }
- hs.finishedHash.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
- return err
- }
- }
-
- helloDone := new(serverHelloDoneMsg)
- hs.finishedHash.Write(helloDone.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
- return err
- }
-
- if _, err := c.flush(); err != nil {
- return err
- }
-
- var pub crypto.PublicKey // public key for client auth, if any
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- // If we requested a client certificate, then the client must send a
- // certificate message, even if it's empty.
- if c.config.ClientAuth >= RequestClientCert {
- certMsg, ok := msg.(*certificateMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.finishedHash.Write(certMsg.marshal())
-
- if err := c.processCertsFromClient(Certificate{
- Certificate: certMsg.certificates,
- }); err != nil {
- return err
- }
- if len(certMsg.certificates) != 0 {
- pub = c.peerCertificates[0].PublicKey
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- // Get client key exchange
- ckx, ok := msg.(*clientKeyExchangeMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(ckx, msg)
- }
- hs.finishedHash.Write(ckx.marshal())
-
- preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
- if err != nil {
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.clientHello.random, hs.hello.random)
- if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.clientHello.random, hs.masterSecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- // If we received a client cert in response to our certificate request message,
- // the client will send us a certificateVerifyMsg immediately after the
- // clientKeyExchangeMsg. This message is a digest of all preceding
- // handshake-layer messages that is signed using the private key corresponding
- // to the client's certificate. This allows us to verify that the client is in
- // possession of the private key of the certificate.
- if len(c.peerCertificates) > 0 {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- var sigType uint8
- var sigHash crypto.Hash
- if c.vers >= VersionTLS12 {
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, certReq.supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(pub)
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- }
-
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
- if err := verifyHandshakeSignature(sigType, pub, sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the client certificate: " + err.Error())
- }
-
- hs.finishedHash.Write(certVerify.marshal())
- }
-
- hs.finishedHash.discardHandshakeBuffer()
-
- return nil
-}
-
-func (hs *serverHandshakeState) establishKeys() error {
- c := hs.c
-
- clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
- keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
-
- var clientCipher, serverCipher interface{}
- var clientHash, serverHash hash.Hash
-
- if hs.suite.aead == nil {
- clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */)
- clientHash = hs.suite.mac(clientMAC)
- serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */)
- serverHash = hs.suite.mac(serverMAC)
- } else {
- clientCipher = hs.suite.aead(clientKey, clientIV)
- serverCipher = hs.suite.aead(serverKey, serverIV)
- }
-
- c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
- c.out.prepareCipherSpec(c.vers, serverCipher, serverHash)
-
- return nil
-}
-
-func (hs *serverHandshakeState) readFinished(out []byte) error {
- c := hs.c
-
- if err := c.readChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- clientFinished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(clientFinished, msg)
- }
-
- verify := hs.finishedHash.clientSum(hs.masterSecret)
- if len(verify) != len(clientFinished.verifyData) ||
- subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: client's Finished message is incorrect")
- }
-
- hs.finishedHash.Write(clientFinished.marshal())
- copy(out, verify)
- return nil
-}
-
-func (hs *serverHandshakeState) sendSessionTicket() error {
- // ticketSupported is set in a resumption handshake if the
- // ticket from the client was encrypted with an old session
- // ticket key and thus a refreshed ticket should be sent.
- if !hs.hello.ticketSupported {
- return nil
- }
-
- c := hs.c
- m := new(newSessionTicketMsg)
-
- createdAt := uint64(c.config.time().Unix())
- if hs.sessionState != nil {
- // If this is re-wrapping an old key, then keep
- // the original time it was created.
- createdAt = hs.sessionState.createdAt
- }
-
- var certsFromClient [][]byte
- for _, cert := range c.peerCertificates {
- certsFromClient = append(certsFromClient, cert.Raw)
- }
- state := sessionState{
- vers: c.vers,
- cipherSuite: hs.suite.id,
- createdAt: createdAt,
- masterSecret: hs.masterSecret,
- certificates: certsFromClient,
- }
- var err error
- m.ticket, err = c.encryptTicket(state.marshal())
- if err != nil {
- return err
- }
-
- hs.finishedHash.Write(m.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeState) sendFinished(out []byte) error {
- c := hs.c
-
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
- return err
- }
-
- finished := new(finishedMsg)
- finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- copy(out, finished.verifyData)
-
- return nil
-}
-
-// processCertsFromClient takes a chain of client certificates either from a
-// Certificates message or from a sessionState and verifies them. It returns
-// the public key of the leaf certificate.
-func (c *Conn) processCertsFromClient(certificate Certificate) error {
- certificates := certificate.Certificate
- certs := make([]*x509.Certificate, len(certificates))
- var err error
- for i, asn1Data := range certificates {
- if certs[i], err = x509.ParseCertificate(asn1Data); err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to parse client certificate: " + err.Error())
- }
- }
-
- if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: client didn't provide a certificate")
- }
-
- if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 {
- opts := x509.VerifyOptions{
- Roots: c.config.ClientCAs,
- CurrentTime: c.config.time(),
- Intermediates: x509.NewCertPool(),
- KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
- }
-
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
-
- chains, err := certs[0].Verify(opts)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to verify client certificate: " + err.Error())
- }
-
- c.verifiedChains = chains
- }
-
- c.peerCertificates = certs
- c.ocspResponse = certificate.OCSPStaple
- c.scts = certificate.SignedCertificateTimestamps
-
- if len(certs) > 0 {
- switch certs[0].PublicKey.(type) {
- case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
- default:
- c.sendAlert(alertUnsupportedCertificate)
- return fmt.Errorf("tls: client certificate contains an unsupported public key of type %T", certs[0].PublicKey)
- }
- }
-
- if c.config.VerifyPeerCertificate != nil {
- if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- return nil
-}
-
-func newClientHelloInfo(c *Conn, clientHello *clientHelloMsg) *ClientHelloInfo {
- supportedVersions := clientHello.supportedVersions
- if len(clientHello.supportedVersions) == 0 {
- supportedVersions = supportedVersionsFromMax(clientHello.vers)
- }
-
- return toClientHelloInfo(&clientHelloInfo{
- CipherSuites: clientHello.cipherSuites,
- ServerName: clientHello.serverName,
- SupportedCurves: clientHello.supportedCurves,
- SupportedPoints: clientHello.supportedPoints,
- SignatureSchemes: clientHello.supportedSignatureAlgorithms,
- SupportedProtos: clientHello.alpnProtocols,
- SupportedVersions: supportedVersions,
- Conn: c.conn,
- config: toConfig(c.config),
- })
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_server_tls13.go b/vendor/github.com/marten-seemann/qtls-go1-16/handshake_server_tls13.go
deleted file mode 100644
index e1ab918d3..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/handshake_server_tls13.go
+++ /dev/null
@@ -1,912 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto"
- "crypto/hmac"
- "crypto/rsa"
- "errors"
- "fmt"
- "hash"
- "io"
- "sync/atomic"
- "time"
-)
-
-// maxClientPSKIdentities is the number of client PSK identities the server will
-// attempt to validate. It will ignore the rest not to let cheap ClientHello
-// messages cause too much work in session ticket decryption attempts.
-const maxClientPSKIdentities = 5
-
-type serverHandshakeStateTLS13 struct {
- c *Conn
- clientHello *clientHelloMsg
- hello *serverHelloMsg
- encryptedExtensions *encryptedExtensionsMsg
- sentDummyCCS bool
- usingPSK bool
- suite *cipherSuiteTLS13
- cert *Certificate
- sigAlg SignatureScheme
- earlySecret []byte
- sharedKey []byte
- handshakeSecret []byte
- masterSecret []byte
- trafficSecret []byte // client_application_traffic_secret_0
- transcript hash.Hash
- clientFinished []byte
-}
-
-func (hs *serverHandshakeStateTLS13) handshake() error {
- c := hs.c
-
- // For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2.
- if err := hs.processClientHello(); err != nil {
- return err
- }
- if err := hs.checkForResumption(); err != nil {
- return err
- }
- if err := hs.pickCertificate(); err != nil {
- return err
- }
- c.buffering = true
- if err := hs.sendServerParameters(); err != nil {
- return err
- }
- if err := hs.sendServerCertificate(); err != nil {
- return err
- }
- if err := hs.sendServerFinished(); err != nil {
- return err
- }
- // Note that at this point we could start sending application data without
- // waiting for the client's second flight, but the application might not
- // expect the lack of replay protection of the ClientHello parameters.
- if _, err := c.flush(); err != nil {
- return err
- }
- if err := hs.readClientCertificate(); err != nil {
- return err
- }
- if err := hs.readClientFinished(); err != nil {
- return err
- }
-
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) processClientHello() error {
- c := hs.c
-
- hs.hello = new(serverHelloMsg)
- hs.encryptedExtensions = new(encryptedExtensionsMsg)
-
- // TLS 1.3 froze the ServerHello.legacy_version field, and uses
- // supported_versions instead. See RFC 8446, sections 4.1.3 and 4.2.1.
- hs.hello.vers = VersionTLS12
- hs.hello.supportedVersion = c.vers
-
- if len(hs.clientHello.supportedVersions) == 0 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client used the legacy version field to negotiate TLS 1.3")
- }
-
- // Abort if the client is doing a fallback and landing lower than what we
- // support. See RFC 7507, which however does not specify the interaction
- // with supported_versions. The only difference is that with
- // supported_versions a client has a chance to attempt a [TLS 1.2, TLS 1.4]
- // handshake in case TLS 1.3 is broken but 1.2 is not. Alas, in that case,
- // it will have to drop the TLS_FALLBACK_SCSV protection if it falls back to
- // TLS 1.2, because a TLS 1.3 server would abort here. The situation before
- // supported_versions was not better because there was just no way to do a
- // TLS 1.4 handshake without risking the server selecting TLS 1.3.
- for _, id := range hs.clientHello.cipherSuites {
- if id == TLS_FALLBACK_SCSV {
- // Use c.vers instead of max(supported_versions) because an attacker
- // could defeat this by adding an arbitrary high version otherwise.
- if c.vers < c.config.maxSupportedVersion() {
- c.sendAlert(alertInappropriateFallback)
- return errors.New("tls: client using inappropriate protocol fallback")
- }
- break
- }
- }
-
- if len(hs.clientHello.compressionMethods) != 1 ||
- hs.clientHello.compressionMethods[0] != compressionNone {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: TLS 1.3 client supports illegal compression methods")
- }
-
- hs.hello.random = make([]byte, 32)
- if _, err := io.ReadFull(c.config.rand(), hs.hello.random); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if len(hs.clientHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
-
- hs.hello.sessionId = hs.clientHello.sessionId
- hs.hello.compressionMethod = compressionNone
-
- var preferenceList, supportedList, ourList []uint16
- var useConfiguredCipherSuites bool
- for _, suiteID := range c.config.CipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- ourList = append(ourList, suiteID)
- break
- }
- }
- }
- if len(ourList) > 0 {
- useConfiguredCipherSuites = true
- } else {
- ourList = defaultCipherSuitesTLS13()
- }
- if c.config.PreferServerCipherSuites {
- preferenceList = ourList
- supportedList = hs.clientHello.cipherSuites
-
- // If the client does not seem to have hardware support for AES-GCM,
- // prefer other AEAD ciphers even if we prioritized AES-GCM ciphers
- // by default.
- if !useConfiguredCipherSuites && !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceList = deprioritizeAES(preferenceList)
- }
- } else {
- preferenceList = hs.clientHello.cipherSuites
- supportedList = ourList
-
- // If we don't have hardware support for AES-GCM, prefer other AEAD
- // ciphers even if the client prioritized AES-GCM.
- if !hasAESGCMHardwareSupport {
- preferenceList = deprioritizeAES(preferenceList)
- }
- }
- for _, suiteID := range preferenceList {
- hs.suite = mutualCipherSuiteTLS13(supportedList, suiteID)
- if hs.suite != nil {
- break
- }
- }
- if hs.suite == nil {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no cipher suite supported by both client and server")
- }
- c.cipherSuite = hs.suite.id
- hs.hello.cipherSuite = hs.suite.id
- hs.transcript = hs.suite.hash.New()
-
- // Pick the ECDHE group in server preference order, but give priority to
- // groups with a key share, to avoid a HelloRetryRequest round-trip.
- var selectedGroup CurveID
- var clientKeyShare *keyShare
-GroupSelection:
- for _, preferredGroup := range c.config.curvePreferences() {
- for _, ks := range hs.clientHello.keyShares {
- if ks.group == preferredGroup {
- selectedGroup = ks.group
- clientKeyShare = &ks
- break GroupSelection
- }
- }
- if selectedGroup != 0 {
- continue
- }
- for _, group := range hs.clientHello.supportedCurves {
- if group == preferredGroup {
- selectedGroup = group
- break
- }
- }
- }
- if selectedGroup == 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no ECDHE curve supported by both client and server")
- }
- if clientKeyShare == nil {
- if err := hs.doHelloRetryRequest(selectedGroup); err != nil {
- return err
- }
- clientKeyShare = &hs.clientHello.keyShares[0]
- }
-
- if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok {
- c.sendAlert(alertInternalError)
- return errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err := generateECDHEParameters(c.config.rand(), selectedGroup)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()}
- hs.sharedKey = params.SharedKey(clientKeyShare.data)
- if hs.sharedKey == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid client key share")
- }
-
- c.serverName = hs.clientHello.serverName
-
- if c.extraConfig != nil && c.extraConfig.ReceivedExtensions != nil {
- c.extraConfig.ReceivedExtensions(typeClientHello, hs.clientHello.additionalExtensions)
- }
-
- if len(hs.clientHello.alpnProtocols) > 0 {
- if selectedProto := mutualProtocol(hs.clientHello.alpnProtocols, c.config.NextProtos); selectedProto != "" {
- hs.encryptedExtensions.alpnProtocol = selectedProto
- c.clientProtocol = selectedProto
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) checkForResumption() error {
- c := hs.c
-
- if c.config.SessionTicketsDisabled {
- return nil
- }
-
- modeOK := false
- for _, mode := range hs.clientHello.pskModes {
- if mode == pskModeDHE {
- modeOK = true
- break
- }
- }
- if !modeOK {
- return nil
- }
-
- if len(hs.clientHello.pskIdentities) != len(hs.clientHello.pskBinders) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid or missing PSK binders")
- }
- if len(hs.clientHello.pskIdentities) == 0 {
- return nil
- }
-
- for i, identity := range hs.clientHello.pskIdentities {
- if i >= maxClientPSKIdentities {
- break
- }
-
- plaintext, _ := c.decryptTicket(identity.label)
- if plaintext == nil {
- continue
- }
- sessionState := new(sessionStateTLS13)
- if ok := sessionState.unmarshal(plaintext); !ok {
- continue
- }
-
- if hs.clientHello.earlyData {
- if sessionState.maxEarlyData == 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: client sent unexpected early data")
- }
-
- if sessionState.alpn == c.clientProtocol &&
- c.extraConfig != nil && c.extraConfig.MaxEarlyData > 0 &&
- c.extraConfig.Accept0RTT != nil && c.extraConfig.Accept0RTT(sessionState.appData) {
- hs.encryptedExtensions.earlyData = true
- c.used0RTT = true
- }
- }
-
- createdAt := time.Unix(int64(sessionState.createdAt), 0)
- if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
- continue
- }
-
- // We don't check the obfuscated ticket age because it's affected by
- // clock skew and it's only a freshness signal useful for shrinking the
- // window for replay attacks, which don't affect us as we don't do 0-RTT.
-
- pskSuite := cipherSuiteTLS13ByID(sessionState.cipherSuite)
- if pskSuite == nil || pskSuite.hash != hs.suite.hash {
- continue
- }
-
- // PSK connections don't re-establish client certificates, but carry
- // them over in the session ticket. Ensure the presence of client certs
- // in the ticket is consistent with the configured requirements.
- sessionHasClientCerts := len(sessionState.certificate.Certificate) != 0
- needClientCerts := requiresClientCert(c.config.ClientAuth)
- if needClientCerts && !sessionHasClientCerts {
- continue
- }
- if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
- continue
- }
-
- psk := hs.suite.expandLabel(sessionState.resumptionSecret, "resumption",
- nil, hs.suite.hash.Size())
- hs.earlySecret = hs.suite.extract(psk, nil)
- binderKey := hs.suite.deriveSecret(hs.earlySecret, resumptionBinderLabel, nil)
- // Clone the transcript in case a HelloRetryRequest was recorded.
- transcript := cloneHash(hs.transcript, hs.suite.hash)
- if transcript == nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: internal error: failed to clone hash")
- }
- transcript.Write(hs.clientHello.marshalWithoutBinders())
- pskBinder := hs.suite.finishedHash(binderKey, transcript)
- if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid PSK binder")
- }
-
- c.didResume = true
- if err := c.processCertsFromClient(sessionState.certificate); err != nil {
- return err
- }
-
- h := cloneHash(hs.transcript, hs.suite.hash)
- h.Write(hs.clientHello.marshal())
- if hs.encryptedExtensions.earlyData {
- clientEarlySecret := hs.suite.deriveSecret(hs.earlySecret, "c e traffic", h)
- c.in.exportKey(Encryption0RTT, hs.suite, clientEarlySecret)
- if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hs.clientHello.random, clientEarlySecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- }
-
- hs.hello.selectedIdentityPresent = true
- hs.hello.selectedIdentity = uint16(i)
- hs.usingPSK = true
- return nil
- }
-
- return nil
-}
-
-// cloneHash uses the encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
-// interfaces implemented by standard library hashes to clone the state of in
-// to a new instance of h. It returns nil if the operation fails.
-func cloneHash(in hash.Hash, h crypto.Hash) hash.Hash {
- // Recreate the interface to avoid importing encoding.
- type binaryMarshaler interface {
- MarshalBinary() (data []byte, err error)
- UnmarshalBinary(data []byte) error
- }
- marshaler, ok := in.(binaryMarshaler)
- if !ok {
- return nil
- }
- state, err := marshaler.MarshalBinary()
- if err != nil {
- return nil
- }
- out := h.New()
- unmarshaler, ok := out.(binaryMarshaler)
- if !ok {
- return nil
- }
- if err := unmarshaler.UnmarshalBinary(state); err != nil {
- return nil
- }
- return out
-}
-
-func (hs *serverHandshakeStateTLS13) pickCertificate() error {
- c := hs.c
-
- // Only one of PSK and certificates are used at a time.
- if hs.usingPSK {
- return nil
- }
-
- // signature_algorithms is required in TLS 1.3. See RFC 8446, Section 4.2.3.
- if len(hs.clientHello.supportedSignatureAlgorithms) == 0 {
- return c.sendAlert(alertMissingExtension)
- }
-
- certificate, err := c.config.getCertificate(newClientHelloInfo(c, hs.clientHello))
- if err != nil {
- if err == errNoCertificates {
- c.sendAlert(alertUnrecognizedName)
- } else {
- c.sendAlert(alertInternalError)
- }
- return err
- }
- hs.sigAlg, err = selectSignatureScheme(c.vers, certificate, hs.clientHello.supportedSignatureAlgorithms)
- if err != nil {
- // getCertificate returned a certificate that is unsupported or
- // incompatible with the client's signature algorithms.
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- hs.cert = certificate
-
- return nil
-}
-
-// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
-// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
-func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
- if hs.sentDummyCCS {
- return nil
- }
- hs.sentDummyCCS = true
-
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
-}
-
-func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
- c := hs.c
-
- // The first ClientHello gets double-hashed into the transcript upon a
- // HelloRetryRequest. See RFC 8446, Section 4.4.1.
- hs.transcript.Write(hs.clientHello.marshal())
- chHash := hs.transcript.Sum(nil)
- hs.transcript.Reset()
- hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- hs.transcript.Write(chHash)
-
- helloRetryRequest := &serverHelloMsg{
- vers: hs.hello.vers,
- random: helloRetryRequestRandom,
- sessionId: hs.hello.sessionId,
- cipherSuite: hs.hello.cipherSuite,
- compressionMethod: hs.hello.compressionMethod,
- supportedVersion: hs.hello.supportedVersion,
- selectedGroup: selectedGroup,
- }
-
- hs.transcript.Write(helloRetryRequest.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
- return err
- }
-
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- clientHello, ok := msg.(*clientHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(clientHello, msg)
- }
-
- if len(clientHello.keyShares) != 1 || clientHello.keyShares[0].group != selectedGroup {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client sent invalid key share in second ClientHello")
- }
-
- if clientHello.earlyData {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client indicated early data in second ClientHello")
- }
-
- if illegalClientHelloChange(clientHello, hs.clientHello) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client illegally modified second ClientHello")
- }
-
- if clientHello.earlyData {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client offered 0-RTT data in second ClientHello")
- }
-
- hs.clientHello = clientHello
- return nil
-}
-
-// illegalClientHelloChange reports whether the two ClientHello messages are
-// different, with the exception of the changes allowed before and after a
-// HelloRetryRequest. See RFC 8446, Section 4.1.2.
-func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool {
- if len(ch.supportedVersions) != len(ch1.supportedVersions) ||
- len(ch.cipherSuites) != len(ch1.cipherSuites) ||
- len(ch.supportedCurves) != len(ch1.supportedCurves) ||
- len(ch.supportedSignatureAlgorithms) != len(ch1.supportedSignatureAlgorithms) ||
- len(ch.supportedSignatureAlgorithmsCert) != len(ch1.supportedSignatureAlgorithmsCert) ||
- len(ch.alpnProtocols) != len(ch1.alpnProtocols) {
- return true
- }
- for i := range ch.supportedVersions {
- if ch.supportedVersions[i] != ch1.supportedVersions[i] {
- return true
- }
- }
- for i := range ch.cipherSuites {
- if ch.cipherSuites[i] != ch1.cipherSuites[i] {
- return true
- }
- }
- for i := range ch.supportedCurves {
- if ch.supportedCurves[i] != ch1.supportedCurves[i] {
- return true
- }
- }
- for i := range ch.supportedSignatureAlgorithms {
- if ch.supportedSignatureAlgorithms[i] != ch1.supportedSignatureAlgorithms[i] {
- return true
- }
- }
- for i := range ch.supportedSignatureAlgorithmsCert {
- if ch.supportedSignatureAlgorithmsCert[i] != ch1.supportedSignatureAlgorithmsCert[i] {
- return true
- }
- }
- for i := range ch.alpnProtocols {
- if ch.alpnProtocols[i] != ch1.alpnProtocols[i] {
- return true
- }
- }
- return ch.vers != ch1.vers ||
- !bytes.Equal(ch.random, ch1.random) ||
- !bytes.Equal(ch.sessionId, ch1.sessionId) ||
- !bytes.Equal(ch.compressionMethods, ch1.compressionMethods) ||
- ch.serverName != ch1.serverName ||
- ch.ocspStapling != ch1.ocspStapling ||
- !bytes.Equal(ch.supportedPoints, ch1.supportedPoints) ||
- ch.ticketSupported != ch1.ticketSupported ||
- !bytes.Equal(ch.sessionTicket, ch1.sessionTicket) ||
- ch.secureRenegotiationSupported != ch1.secureRenegotiationSupported ||
- !bytes.Equal(ch.secureRenegotiation, ch1.secureRenegotiation) ||
- ch.scts != ch1.scts ||
- !bytes.Equal(ch.cookie, ch1.cookie) ||
- !bytes.Equal(ch.pskModes, ch1.pskModes)
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
- c := hs.c
-
- if c.extraConfig != nil && c.extraConfig.EnforceNextProtoSelection && len(c.clientProtocol) == 0 {
- c.sendAlert(alertNoApplicationProtocol)
- return fmt.Errorf("ALPN negotiation failed. Client offered: %q", hs.clientHello.alpnProtocols)
- }
-
- hs.transcript.Write(hs.clientHello.marshal())
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
-
- earlySecret := hs.earlySecret
- if earlySecret == nil {
- earlySecret = hs.suite.extract(nil, nil)
- }
- hs.handshakeSecret = hs.suite.extract(hs.sharedKey,
- hs.suite.deriveSecret(earlySecret, "derived", nil))
-
- clientSecret := hs.suite.deriveSecret(hs.handshakeSecret,
- clientHandshakeTrafficLabel, hs.transcript)
- c.in.exportKey(EncryptionHandshake, hs.suite, clientSecret)
- c.in.setTrafficSecret(hs.suite, clientSecret)
- serverSecret := hs.suite.deriveSecret(hs.handshakeSecret,
- serverHandshakeTrafficLabel, hs.transcript)
- c.out.exportKey(EncryptionHandshake, hs.suite, serverSecret)
- c.out.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.clientHello.random, clientSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.clientHello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if hs.c.extraConfig != nil && hs.c.extraConfig.GetExtensions != nil {
- hs.encryptedExtensions.additionalExtensions = hs.c.extraConfig.GetExtensions(typeEncryptedExtensions)
- }
-
- hs.transcript.Write(hs.encryptedExtensions.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.encryptedExtensions.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) requestClientCert() bool {
- return hs.c.config.ClientAuth >= RequestClientCert && !hs.usingPSK
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
- c := hs.c
-
- // Only one of PSK and certificates are used at a time.
- if hs.usingPSK {
- return nil
- }
-
- if hs.requestClientCert() {
- // Request a client certificate
- certReq := new(certificateRequestMsgTLS13)
- certReq.ocspStapling = true
- certReq.scts = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- if c.config.ClientCAs != nil {
- certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
- }
-
- hs.transcript.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
- return err
- }
- }
-
- certMsg := new(certificateMsgTLS13)
-
- certMsg.certificate = *hs.cert
- certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
- certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
-
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- certVerifyMsg := new(certificateVerifyMsg)
- certVerifyMsg.hasSignatureAlgorithm = true
- certVerifyMsg.signatureAlgorithm = hs.sigAlg
-
- sigType, sigHash, err := typeAndHashFromSignatureScheme(hs.sigAlg)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
-
- signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := hs.cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- public := hs.cert.PrivateKey.(crypto.Signer).Public()
- if rsaKey, ok := public.(*rsa.PublicKey); ok && sigType == signatureRSAPSS &&
- rsaKey.N.BitLen()/8 < sigHash.Size()*2+2 { // key too small for RSA-PSS
- c.sendAlert(alertHandshakeFailure)
- } else {
- c.sendAlert(alertInternalError)
- }
- return errors.New("tls: failed to sign handshake: " + err.Error())
- }
- certVerifyMsg.signature = sig
-
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
- c := hs.c
-
- finished := &finishedMsg{
- verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
- }
-
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- // Derive secrets that take context through the server Finished.
-
- hs.masterSecret = hs.suite.extract(nil,
- hs.suite.deriveSecret(hs.handshakeSecret, "derived", nil))
-
- hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
- clientApplicationTrafficLabel, hs.transcript)
- serverSecret := hs.suite.deriveSecret(hs.masterSecret,
- serverApplicationTrafficLabel, hs.transcript)
- c.out.exportKey(EncryptionApplication, hs.suite, serverSecret)
- c.out.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientTraffic, hs.clientHello.random, hs.trafficSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.clientHello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
-
- // If we did not request client certificates, at this point we can
- // precompute the client finished and roll the transcript forward to send
- // session tickets in our first flight.
- if !hs.requestClientCert() {
- if err := hs.sendSessionTickets(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) shouldSendSessionTickets() bool {
- if hs.c.config.SessionTicketsDisabled {
- return false
- }
-
- // Don't send tickets the client wouldn't use. See RFC 8446, Section 4.2.9.
- for _, pskMode := range hs.clientHello.pskModes {
- if pskMode == pskModeDHE {
- return true
- }
- }
- return false
-}
-
-func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
- c := hs.c
-
- hs.clientFinished = hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
- finishedMsg := &finishedMsg{
- verifyData: hs.clientFinished,
- }
- hs.transcript.Write(finishedMsg.marshal())
-
- if !hs.shouldSendSessionTickets() {
- return nil
- }
-
- c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret,
- resumptionLabel, hs.transcript)
-
- // Don't send session tickets when the alternative record layer is set.
- // Instead, save the resumption secret on the Conn.
- // Session tickets can then be generated by calling Conn.GetSessionTicket().
- if hs.c.extraConfig != nil && hs.c.extraConfig.AlternativeRecordLayer != nil {
- return nil
- }
-
- m, err := hs.c.getSessionTicketMsg(nil)
- if err != nil {
- return err
- }
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
- c := hs.c
-
- if !hs.requestClientCert() {
- // Make sure the connection is still being verified whether or not
- // the server requested a client certificate.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- return nil
- }
-
- // If we requested a client certificate, then the client must send a
- // certificate message. If it's empty, no CertificateVerify is sent.
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- certMsg, ok := msg.(*certificateMsgTLS13)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.transcript.Write(certMsg.marshal())
-
- if err := c.processCertsFromClient(certMsg.certificate); err != nil {
- return err
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- if len(certMsg.certificate.Certificate) != 0 {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- // See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
- if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
- sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the client certificate: " + err.Error())
- }
-
- hs.transcript.Write(certVerify.marshal())
- }
-
- // If we waited until the client certificates to send session tickets, we
- // are ready to do it now.
- if err := hs.sendSessionTickets(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) readClientFinished() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- finished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(finished, msg)
- }
-
- if !hmac.Equal(hs.clientFinished, finished.verifyData) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid client finished hash")
- }
-
- c.in.exportKey(EncryptionApplication, hs.suite, hs.trafficSecret)
- c.in.setTrafficSecret(hs.suite, hs.trafficSecret)
-
- return nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/key_agreement.go b/vendor/github.com/marten-seemann/qtls-go1-16/key_agreement.go
deleted file mode 100644
index d8f5d4690..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/key_agreement.go
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/md5"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/x509"
- "errors"
- "fmt"
- "io"
-)
-
-var errClientKeyExchange = errors.New("tls: invalid ClientKeyExchange message")
-var errServerKeyExchange = errors.New("tls: invalid ServerKeyExchange message")
-
-// rsaKeyAgreement implements the standard TLS key agreement where the client
-// encrypts the pre-master secret to the server's public key.
-type rsaKeyAgreement struct{}
-
-func (ka rsaKeyAgreement) generateServerKeyExchange(config *config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
- return nil, nil
-}
-
-func (ka rsaKeyAgreement) processClientKeyExchange(config *config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
- if len(ckx.ciphertext) < 2 {
- return nil, errClientKeyExchange
- }
- ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1])
- if ciphertextLen != len(ckx.ciphertext)-2 {
- return nil, errClientKeyExchange
- }
- ciphertext := ckx.ciphertext[2:]
-
- priv, ok := cert.PrivateKey.(crypto.Decrypter)
- if !ok {
- return nil, errors.New("tls: certificate private key does not implement crypto.Decrypter")
- }
- // Perform constant time RSA PKCS #1 v1.5 decryption
- preMasterSecret, err := priv.Decrypt(config.rand(), ciphertext, &rsa.PKCS1v15DecryptOptions{SessionKeyLen: 48})
- if err != nil {
- return nil, err
- }
- // We don't check the version number in the premaster secret. For one,
- // by checking it, we would leak information about the validity of the
- // encrypted pre-master secret. Secondly, it provides only a small
- // benefit against a downgrade attack and some implementations send the
- // wrong version anyway. See the discussion at the end of section
- // 7.4.7.1 of RFC 4346.
- return preMasterSecret, nil
-}
-
-func (ka rsaKeyAgreement) processServerKeyExchange(config *config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
- return errors.New("tls: unexpected ServerKeyExchange")
-}
-
-func (ka rsaKeyAgreement) generateClientKeyExchange(config *config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
- preMasterSecret := make([]byte, 48)
- preMasterSecret[0] = byte(clientHello.vers >> 8)
- preMasterSecret[1] = byte(clientHello.vers)
- _, err := io.ReadFull(config.rand(), preMasterSecret[2:])
- if err != nil {
- return nil, nil, err
- }
-
- rsaKey, ok := cert.PublicKey.(*rsa.PublicKey)
- if !ok {
- return nil, nil, errors.New("tls: server certificate contains incorrect key type for selected ciphersuite")
- }
- encrypted, err := rsa.EncryptPKCS1v15(config.rand(), rsaKey, preMasterSecret)
- if err != nil {
- return nil, nil, err
- }
- ckx := new(clientKeyExchangeMsg)
- ckx.ciphertext = make([]byte, len(encrypted)+2)
- ckx.ciphertext[0] = byte(len(encrypted) >> 8)
- ckx.ciphertext[1] = byte(len(encrypted))
- copy(ckx.ciphertext[2:], encrypted)
- return preMasterSecret, ckx, nil
-}
-
-// sha1Hash calculates a SHA1 hash over the given byte slices.
-func sha1Hash(slices [][]byte) []byte {
- hsha1 := sha1.New()
- for _, slice := range slices {
- hsha1.Write(slice)
- }
- return hsha1.Sum(nil)
-}
-
-// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the
-// concatenation of an MD5 and SHA1 hash.
-func md5SHA1Hash(slices [][]byte) []byte {
- md5sha1 := make([]byte, md5.Size+sha1.Size)
- hmd5 := md5.New()
- for _, slice := range slices {
- hmd5.Write(slice)
- }
- copy(md5sha1, hmd5.Sum(nil))
- copy(md5sha1[md5.Size:], sha1Hash(slices))
- return md5sha1
-}
-
-// hashForServerKeyExchange hashes the given slices and returns their digest
-// using the given hash function (for >= TLS 1.2) or using a default based on
-// the sigType (for earlier TLS versions). For Ed25519 signatures, which don't
-// do pre-hashing, it returns the concatenation of the slices.
-func hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint16, slices ...[]byte) []byte {
- if sigType == signatureEd25519 {
- var signed []byte
- for _, slice := range slices {
- signed = append(signed, slice...)
- }
- return signed
- }
- if version >= VersionTLS12 {
- h := hashFunc.New()
- for _, slice := range slices {
- h.Write(slice)
- }
- digest := h.Sum(nil)
- return digest
- }
- if sigType == signatureECDSA {
- return sha1Hash(slices)
- }
- return md5SHA1Hash(slices)
-}
-
-// ecdheKeyAgreement implements a TLS key agreement where the server
-// generates an ephemeral EC public/private key pair and signs it. The
-// pre-master secret is then calculated using ECDH. The signature may
-// be ECDSA, Ed25519 or RSA.
-type ecdheKeyAgreement struct {
- version uint16
- isRSA bool
- params ecdheParameters
-
- // ckx and preMasterSecret are generated in processServerKeyExchange
- // and returned in generateClientKeyExchange.
- ckx *clientKeyExchangeMsg
- preMasterSecret []byte
-}
-
-func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
- var curveID CurveID
- for _, c := range clientHello.supportedCurves {
- if config.supportsCurve(c) {
- curveID = c
- break
- }
- }
-
- if curveID == 0 {
- return nil, errors.New("tls: no supported elliptic curves offered")
- }
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return nil, errors.New("tls: CurvePreferences includes unsupported curve")
- }
-
- params, err := generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return nil, err
- }
- ka.params = params
-
- // See RFC 4492, Section 5.4.
- ecdhePublic := params.PublicKey()
- serverECDHEParams := make([]byte, 1+2+1+len(ecdhePublic))
- serverECDHEParams[0] = 3 // named curve
- serverECDHEParams[1] = byte(curveID >> 8)
- serverECDHEParams[2] = byte(curveID)
- serverECDHEParams[3] = byte(len(ecdhePublic))
- copy(serverECDHEParams[4:], ecdhePublic)
-
- priv, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return nil, fmt.Errorf("tls: certificate private key of type %T does not implement crypto.Signer", cert.PrivateKey)
- }
-
- var signatureAlgorithm SignatureScheme
- var sigType uint8
- var sigHash crypto.Hash
- if ka.version >= VersionTLS12 {
- signatureAlgorithm, err = selectSignatureScheme(ka.version, cert, clientHello.supportedSignatureAlgorithms)
- if err != nil {
- return nil, err
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return nil, err
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(priv.Public())
- if err != nil {
- return nil, err
- }
- }
- if (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {
- return nil, errors.New("tls: certificate cannot be used with the selected cipher suite")
- }
-
- signed := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, hello.random, serverECDHEParams)
-
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := priv.Sign(config.rand(), signed, signOpts)
- if err != nil {
- return nil, errors.New("tls: failed to sign ECDHE parameters: " + err.Error())
- }
-
- skx := new(serverKeyExchangeMsg)
- sigAndHashLen := 0
- if ka.version >= VersionTLS12 {
- sigAndHashLen = 2
- }
- skx.key = make([]byte, len(serverECDHEParams)+sigAndHashLen+2+len(sig))
- copy(skx.key, serverECDHEParams)
- k := skx.key[len(serverECDHEParams):]
- if ka.version >= VersionTLS12 {
- k[0] = byte(signatureAlgorithm >> 8)
- k[1] = byte(signatureAlgorithm)
- k = k[2:]
- }
- k[0] = byte(len(sig) >> 8)
- k[1] = byte(len(sig))
- copy(k[2:], sig)
-
- return skx, nil
-}
-
-func (ka *ecdheKeyAgreement) processClientKeyExchange(config *config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
- if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {
- return nil, errClientKeyExchange
- }
-
- preMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:])
- if preMasterSecret == nil {
- return nil, errClientKeyExchange
- }
-
- return preMasterSecret, nil
-}
-
-func (ka *ecdheKeyAgreement) processServerKeyExchange(config *config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
- if len(skx.key) < 4 {
- return errServerKeyExchange
- }
- if skx.key[0] != 3 { // named curve
- return errors.New("tls: server selected unsupported curve")
- }
- curveID := CurveID(skx.key[1])<<8 | CurveID(skx.key[2])
-
- publicLen := int(skx.key[3])
- if publicLen+4 > len(skx.key) {
- return errServerKeyExchange
- }
- serverECDHEParams := skx.key[:4+publicLen]
- publicKey := serverECDHEParams[4:]
-
- sig := skx.key[4+publicLen:]
- if len(sig) < 2 {
- return errServerKeyExchange
- }
-
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return errors.New("tls: server selected unsupported curve")
- }
-
- params, err := generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return err
- }
- ka.params = params
-
- ka.preMasterSecret = params.SharedKey(publicKey)
- if ka.preMasterSecret == nil {
- return errServerKeyExchange
- }
-
- ourPublicKey := params.PublicKey()
- ka.ckx = new(clientKeyExchangeMsg)
- ka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey))
- ka.ckx.ciphertext[0] = byte(len(ourPublicKey))
- copy(ka.ckx.ciphertext[1:], ourPublicKey)
-
- var sigType uint8
- var sigHash crypto.Hash
- if ka.version >= VersionTLS12 {
- signatureAlgorithm := SignatureScheme(sig[0])<<8 | SignatureScheme(sig[1])
- sig = sig[2:]
- if len(sig) < 2 {
- return errServerKeyExchange
- }
-
- if !isSupportedSignatureAlgorithm(signatureAlgorithm, clientHello.supportedSignatureAlgorithms) {
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return err
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(cert.PublicKey)
- if err != nil {
- return err
- }
- }
- if (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {
- return errServerKeyExchange
- }
-
- sigLen := int(sig[0])<<8 | int(sig[1])
- if sigLen+2 != len(sig) {
- return errServerKeyExchange
- }
- sig = sig[2:]
-
- signed := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, serverHello.random, serverECDHEParams)
- if err := verifyHandshakeSignature(sigType, cert.PublicKey, sigHash, signed, sig); err != nil {
- return errors.New("tls: invalid signature by the server certificate: " + err.Error())
- }
- return nil
-}
-
-func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
- if ka.ckx == nil {
- return nil, nil, errors.New("tls: missing ServerKeyExchange message")
- }
-
- return ka.preMasterSecret, ka.ckx, nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/ticket.go b/vendor/github.com/marten-seemann/qtls-go1-16/ticket.go
deleted file mode 100644
index 006b8c1de..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/ticket.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/cipher"
- "crypto/hmac"
- "crypto/sha256"
- "crypto/subtle"
- "errors"
- "io"
- "time"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-// sessionState contains the information that is serialized into a session
-// ticket in order to later resume a connection.
-type sessionState struct {
- vers uint16
- cipherSuite uint16
- createdAt uint64
- masterSecret []byte // opaque master_secret<1..2^16-1>;
- // struct { opaque certificate<1..2^24-1> } Certificate;
- certificates [][]byte // Certificate certificate_list<0..2^24-1>;
-
- // usedOldKey is true if the ticket from which this session came from
- // was encrypted with an older key and thus should be refreshed.
- usedOldKey bool
-}
-
-func (m *sessionState) marshal() []byte {
- var b cryptobyte.Builder
- b.AddUint16(m.vers)
- b.AddUint16(m.cipherSuite)
- addUint64(&b, m.createdAt)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.masterSecret)
- })
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, cert := range m.certificates {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(cert)
- })
- }
- })
- return b.BytesOrPanic()
-}
-
-func (m *sessionState) unmarshal(data []byte) bool {
- *m = sessionState{usedOldKey: m.usedOldKey}
- s := cryptobyte.String(data)
- if ok := s.ReadUint16(&m.vers) &&
- s.ReadUint16(&m.cipherSuite) &&
- readUint64(&s, &m.createdAt) &&
- readUint16LengthPrefixed(&s, &m.masterSecret) &&
- len(m.masterSecret) != 0; !ok {
- return false
- }
- var certList cryptobyte.String
- if !s.ReadUint24LengthPrefixed(&certList) {
- return false
- }
- for !certList.Empty() {
- var cert []byte
- if !readUint24LengthPrefixed(&certList, &cert) {
- return false
- }
- m.certificates = append(m.certificates, cert)
- }
- return s.Empty()
-}
-
-// sessionStateTLS13 is the content of a TLS 1.3 session ticket. Its first
-// version (revision = 0) doesn't carry any of the information needed for 0-RTT
-// validation and the nonce is always empty.
-// version (revision = 1) carries the max_early_data_size sent in the ticket.
-// version (revision = 2) carries the ALPN sent in the ticket.
-type sessionStateTLS13 struct {
- // uint8 version = 0x0304;
- // uint8 revision = 2;
- cipherSuite uint16
- createdAt uint64
- resumptionSecret []byte // opaque resumption_master_secret<1..2^8-1>;
- certificate Certificate // CertificateEntry certificate_list<0..2^24-1>;
- maxEarlyData uint32
- alpn string
-
- appData []byte
-}
-
-func (m *sessionStateTLS13) marshal() []byte {
- var b cryptobyte.Builder
- b.AddUint16(VersionTLS13)
- b.AddUint8(2) // revision
- b.AddUint16(m.cipherSuite)
- addUint64(&b, m.createdAt)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.resumptionSecret)
- })
- marshalCertificate(&b, m.certificate)
- b.AddUint32(m.maxEarlyData)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpn))
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.appData)
- })
- return b.BytesOrPanic()
-}
-
-func (m *sessionStateTLS13) unmarshal(data []byte) bool {
- *m = sessionStateTLS13{}
- s := cryptobyte.String(data)
- var version uint16
- var revision uint8
- var alpn []byte
- ret := s.ReadUint16(&version) &&
- version == VersionTLS13 &&
- s.ReadUint8(&revision) &&
- revision == 2 &&
- s.ReadUint16(&m.cipherSuite) &&
- readUint64(&s, &m.createdAt) &&
- readUint8LengthPrefixed(&s, &m.resumptionSecret) &&
- len(m.resumptionSecret) != 0 &&
- unmarshalCertificate(&s, &m.certificate) &&
- s.ReadUint32(&m.maxEarlyData) &&
- readUint8LengthPrefixed(&s, &alpn) &&
- readUint16LengthPrefixed(&s, &m.appData) &&
- s.Empty()
- m.alpn = string(alpn)
- return ret
-}
-
-func (c *Conn) encryptTicket(state []byte) ([]byte, error) {
- if len(c.ticketKeys) == 0 {
- return nil, errors.New("tls: internal error: session ticket keys unavailable")
- }
-
- encrypted := make([]byte, ticketKeyNameLen+aes.BlockSize+len(state)+sha256.Size)
- keyName := encrypted[:ticketKeyNameLen]
- iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
- macBytes := encrypted[len(encrypted)-sha256.Size:]
-
- if _, err := io.ReadFull(c.config.rand(), iv); err != nil {
- return nil, err
- }
- key := c.ticketKeys[0]
- copy(keyName, key.keyName[:])
- block, err := aes.NewCipher(key.aesKey[:])
- if err != nil {
- return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error())
- }
- cipher.NewCTR(block, iv).XORKeyStream(encrypted[ticketKeyNameLen+aes.BlockSize:], state)
-
- mac := hmac.New(sha256.New, key.hmacKey[:])
- mac.Write(encrypted[:len(encrypted)-sha256.Size])
- mac.Sum(macBytes[:0])
-
- return encrypted, nil
-}
-
-func (c *Conn) decryptTicket(encrypted []byte) (plaintext []byte, usedOldKey bool) {
- if len(encrypted) < ticketKeyNameLen+aes.BlockSize+sha256.Size {
- return nil, false
- }
-
- keyName := encrypted[:ticketKeyNameLen]
- iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
- macBytes := encrypted[len(encrypted)-sha256.Size:]
- ciphertext := encrypted[ticketKeyNameLen+aes.BlockSize : len(encrypted)-sha256.Size]
-
- keyIndex := -1
- for i, candidateKey := range c.ticketKeys {
- if bytes.Equal(keyName, candidateKey.keyName[:]) {
- keyIndex = i
- break
- }
- }
- if keyIndex == -1 {
- return nil, false
- }
- key := &c.ticketKeys[keyIndex]
-
- mac := hmac.New(sha256.New, key.hmacKey[:])
- mac.Write(encrypted[:len(encrypted)-sha256.Size])
- expected := mac.Sum(nil)
-
- if subtle.ConstantTimeCompare(macBytes, expected) != 1 {
- return nil, false
- }
-
- block, err := aes.NewCipher(key.aesKey[:])
- if err != nil {
- return nil, false
- }
- plaintext = make([]byte, len(ciphertext))
- cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext)
-
- return plaintext, keyIndex > 0
-}
-
-func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, error) {
- m := new(newSessionTicketMsgTLS13)
-
- var certsFromClient [][]byte
- for _, cert := range c.peerCertificates {
- certsFromClient = append(certsFromClient, cert.Raw)
- }
- state := sessionStateTLS13{
- cipherSuite: c.cipherSuite,
- createdAt: uint64(c.config.time().Unix()),
- resumptionSecret: c.resumptionSecret,
- certificate: Certificate{
- Certificate: certsFromClient,
- OCSPStaple: c.ocspResponse,
- SignedCertificateTimestamps: c.scts,
- },
- appData: appData,
- alpn: c.clientProtocol,
- }
- if c.extraConfig != nil {
- state.maxEarlyData = c.extraConfig.MaxEarlyData
- }
- var err error
- m.label, err = c.encryptTicket(state.marshal())
- if err != nil {
- return nil, err
- }
- m.lifetime = uint32(maxSessionTicketLifetime / time.Second)
- if c.extraConfig != nil {
- m.maxEarlyData = c.extraConfig.MaxEarlyData
- }
- return m, nil
-}
-
-// GetSessionTicket generates a new session ticket.
-// It should only be called after the handshake completes.
-// It can only be used for servers, and only if the alternative record layer is set.
-// The ticket may be nil if config.SessionTicketsDisabled is set,
-// or if the client isn't able to receive session tickets.
-func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) {
- if c.isClient || !c.handshakeComplete() || c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil {
- return nil, errors.New("GetSessionTicket is only valid for servers after completion of the handshake, and if an alternative record layer is set.")
- }
- if c.config.SessionTicketsDisabled {
- return nil, nil
- }
-
- m, err := c.getSessionTicketMsg(appData)
- if err != nil {
- return nil, err
- }
- return m.marshal(), nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/tls.go b/vendor/github.com/marten-seemann/qtls-go1-16/tls.go
deleted file mode 100644
index 10cbae03b..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-16/tls.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package qtls partially implements TLS 1.2, as specified in RFC 5246,
-// and TLS 1.3, as specified in RFC 8446.
-package qtls
-
-// BUG(agl): The crypto/tls package only implements some countermeasures
-// against Lucky13 attacks on CBC-mode encryption, and only on SHA1
-// variants. See http://www.isg.rhul.ac.uk/tls/TLStiming.pdf and
-// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
- "fmt"
- "net"
- "os"
- "strings"
- "time"
-)
-
-// Server returns a new TLS server side connection
-// using conn as the underlying transport.
-// The configuration config must be non-nil and must include
-// at least one certificate or else set GetCertificate.
-func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- c := &Conn{
- conn: conn,
- config: fromConfig(config),
- extraConfig: extraConfig,
- }
- c.handshakeFn = c.serverHandshake
- return c
-}
-
-// Client returns a new TLS client side connection
-// using conn as the underlying transport.
-// The config cannot be nil: users must set either ServerName or
-// InsecureSkipVerify in the config.
-func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- c := &Conn{
- conn: conn,
- config: fromConfig(config),
- extraConfig: extraConfig,
- isClient: true,
- }
- c.handshakeFn = c.clientHandshake
- return c
-}
-
-// A listener implements a network listener (net.Listener) for TLS connections.
-type listener struct {
- net.Listener
- config *Config
- extraConfig *ExtraConfig
-}
-
-// Accept waits for and returns the next incoming TLS connection.
-// The returned connection is of type *Conn.
-func (l *listener) Accept() (net.Conn, error) {
- c, err := l.Listener.Accept()
- if err != nil {
- return nil, err
- }
- return Server(c, l.config, l.extraConfig), nil
-}
-
-// NewListener creates a Listener which accepts connections from an inner
-// Listener and wraps each connection with Server.
-// The configuration config must be non-nil and must include
-// at least one certificate or else set GetCertificate.
-func NewListener(inner net.Listener, config *Config, extraConfig *ExtraConfig) net.Listener {
- l := new(listener)
- l.Listener = inner
- l.config = config
- l.extraConfig = extraConfig
- return l
-}
-
-// Listen creates a TLS listener accepting connections on the
-// given network address using net.Listen.
-// The configuration config must be non-nil and must include
-// at least one certificate or else set GetCertificate.
-func Listen(network, laddr string, config *Config, extraConfig *ExtraConfig) (net.Listener, error) {
- if config == nil || len(config.Certificates) == 0 &&
- config.GetCertificate == nil && config.GetConfigForClient == nil {
- return nil, errors.New("tls: neither Certificates, GetCertificate, nor GetConfigForClient set in Config")
- }
- l, err := net.Listen(network, laddr)
- if err != nil {
- return nil, err
- }
- return NewListener(l, config, extraConfig), nil
-}
-
-type timeoutError struct{}
-
-func (timeoutError) Error() string { return "tls: DialWithDialer timed out" }
-func (timeoutError) Timeout() bool { return true }
-func (timeoutError) Temporary() bool { return true }
-
-// DialWithDialer connects to the given network address using dialer.Dial and
-// then initiates a TLS handshake, returning the resulting TLS connection. Any
-// timeout or deadline given in the dialer apply to connection and TLS
-// handshake as a whole.
-//
-// DialWithDialer interprets a nil configuration as equivalent to the zero
-// configuration; see the documentation of Config for the defaults.
-func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config, extraConfig *ExtraConfig) (*Conn, error) {
- return dial(context.Background(), dialer, network, addr, config, extraConfig)
-}
-
-func dial(ctx context.Context, netDialer *net.Dialer, network, addr string, config *Config, extraConfig *ExtraConfig) (*Conn, error) {
- // We want the Timeout and Deadline values from dialer to cover the
- // whole process: TCP connection and TLS handshake. This means that we
- // also need to start our own timers now.
- timeout := netDialer.Timeout
-
- if !netDialer.Deadline.IsZero() {
- deadlineTimeout := time.Until(netDialer.Deadline)
- if timeout == 0 || deadlineTimeout < timeout {
- timeout = deadlineTimeout
- }
- }
-
- // hsErrCh is non-nil if we might not wait for Handshake to complete.
- var hsErrCh chan error
- if timeout != 0 || ctx.Done() != nil {
- hsErrCh = make(chan error, 2)
- }
- if timeout != 0 {
- timer := time.AfterFunc(timeout, func() {
- hsErrCh <- timeoutError{}
- })
- defer timer.Stop()
- }
-
- rawConn, err := netDialer.DialContext(ctx, network, addr)
- if err != nil {
- return nil, err
- }
-
- colonPos := strings.LastIndex(addr, ":")
- if colonPos == -1 {
- colonPos = len(addr)
- }
- hostname := addr[:colonPos]
-
- if config == nil {
- config = defaultConfig()
- }
- // If no ServerName is set, infer the ServerName
- // from the hostname we're connecting to.
- if config.ServerName == "" {
- // Make a copy to avoid polluting argument or default.
- c := config.Clone()
- c.ServerName = hostname
- config = c
- }
-
- conn := Client(rawConn, config, extraConfig)
-
- if hsErrCh == nil {
- err = conn.Handshake()
- } else {
- go func() {
- hsErrCh <- conn.Handshake()
- }()
-
- select {
- case <-ctx.Done():
- err = ctx.Err()
- case err = <-hsErrCh:
- if err != nil {
- // If the error was due to the context
- // closing, prefer the context's error, rather
- // than some random network teardown error.
- if e := ctx.Err(); e != nil {
- err = e
- }
- }
- }
- }
-
- if err != nil {
- rawConn.Close()
- return nil, err
- }
-
- return conn, nil
-}
-
-// Dial connects to the given network address using net.Dial
-// and then initiates a TLS handshake, returning the resulting
-// TLS connection.
-// Dial interprets a nil configuration as equivalent to
-// the zero configuration; see the documentation of Config
-// for the defaults.
-func Dial(network, addr string, config *Config, extraConfig *ExtraConfig) (*Conn, error) {
- return DialWithDialer(new(net.Dialer), network, addr, config, extraConfig)
-}
-
-// Dialer dials TLS connections given a configuration and a Dialer for the
-// underlying connection.
-type Dialer struct {
- // NetDialer is the optional dialer to use for the TLS connections'
- // underlying TCP connections.
- // A nil NetDialer is equivalent to the net.Dialer zero value.
- NetDialer *net.Dialer
-
- // Config is the TLS configuration to use for new connections.
- // A nil configuration is equivalent to the zero
- // configuration; see the documentation of Config for the
- // defaults.
- Config *Config
-
- ExtraConfig *ExtraConfig
-}
-
-// Dial connects to the given network address and initiates a TLS
-// handshake, returning the resulting TLS connection.
-//
-// The returned Conn, if any, will always be of type *Conn.
-func (d *Dialer) Dial(network, addr string) (net.Conn, error) {
- return d.DialContext(context.Background(), network, addr)
-}
-
-func (d *Dialer) netDialer() *net.Dialer {
- if d.NetDialer != nil {
- return d.NetDialer
- }
- return new(net.Dialer)
-}
-
-// DialContext connects to the given network address and initiates a TLS
-// handshake, returning the resulting TLS connection.
-//
-// The provided Context must be non-nil. If the context expires before
-// the connection is complete, an error is returned. Once successfully
-// connected, any expiration of the context will not affect the
-// connection.
-//
-// The returned Conn, if any, will always be of type *Conn.
-func (d *Dialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
- c, err := dial(ctx, d.netDialer(), network, addr, d.Config, d.ExtraConfig)
- if err != nil {
- // Don't return c (a typed nil) in an interface.
- return nil, err
- }
- return c, nil
-}
-
-// LoadX509KeyPair reads and parses a public/private key pair from a pair
-// of files. The files must contain PEM encoded data. The certificate file
-// may contain intermediate certificates following the leaf certificate to
-// form a certificate chain. On successful return, Certificate.Leaf will
-// be nil because the parsed form of the certificate is not retained.
-func LoadX509KeyPair(certFile, keyFile string) (Certificate, error) {
- certPEMBlock, err := os.ReadFile(certFile)
- if err != nil {
- return Certificate{}, err
- }
- keyPEMBlock, err := os.ReadFile(keyFile)
- if err != nil {
- return Certificate{}, err
- }
- return X509KeyPair(certPEMBlock, keyPEMBlock)
-}
-
-// X509KeyPair parses a public/private key pair from a pair of
-// PEM encoded data. On successful return, Certificate.Leaf will be nil because
-// the parsed form of the certificate is not retained.
-func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
- fail := func(err error) (Certificate, error) { return Certificate{}, err }
-
- var cert Certificate
- var skippedBlockTypes []string
- for {
- var certDERBlock *pem.Block
- certDERBlock, certPEMBlock = pem.Decode(certPEMBlock)
- if certDERBlock == nil {
- break
- }
- if certDERBlock.Type == "CERTIFICATE" {
- cert.Certificate = append(cert.Certificate, certDERBlock.Bytes)
- } else {
- skippedBlockTypes = append(skippedBlockTypes, certDERBlock.Type)
- }
- }
-
- if len(cert.Certificate) == 0 {
- if len(skippedBlockTypes) == 0 {
- return fail(errors.New("tls: failed to find any PEM data in certificate input"))
- }
- if len(skippedBlockTypes) == 1 && strings.HasSuffix(skippedBlockTypes[0], "PRIVATE KEY") {
- return fail(errors.New("tls: failed to find certificate PEM data in certificate input, but did find a private key; PEM inputs may have been switched"))
- }
- return fail(fmt.Errorf("tls: failed to find \"CERTIFICATE\" PEM block in certificate input after skipping PEM blocks of the following types: %v", skippedBlockTypes))
- }
-
- skippedBlockTypes = skippedBlockTypes[:0]
- var keyDERBlock *pem.Block
- for {
- keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock)
- if keyDERBlock == nil {
- if len(skippedBlockTypes) == 0 {
- return fail(errors.New("tls: failed to find any PEM data in key input"))
- }
- if len(skippedBlockTypes) == 1 && skippedBlockTypes[0] == "CERTIFICATE" {
- return fail(errors.New("tls: found a certificate rather than a key in the PEM for the private key"))
- }
- return fail(fmt.Errorf("tls: failed to find PEM block with type ending in \"PRIVATE KEY\" in key input after skipping PEM blocks of the following types: %v", skippedBlockTypes))
- }
- if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") {
- break
- }
- skippedBlockTypes = append(skippedBlockTypes, keyDERBlock.Type)
- }
-
- // We don't need to parse the public key for TLS, but we so do anyway
- // to check that it looks sane and matches the private key.
- x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
- if err != nil {
- return fail(err)
- }
-
- cert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes)
- if err != nil {
- return fail(err)
- }
-
- switch pub := x509Cert.PublicKey.(type) {
- case *rsa.PublicKey:
- priv, ok := cert.PrivateKey.(*rsa.PrivateKey)
- if !ok {
- return fail(errors.New("tls: private key type does not match public key type"))
- }
- if pub.N.Cmp(priv.N) != 0 {
- return fail(errors.New("tls: private key does not match public key"))
- }
- case *ecdsa.PublicKey:
- priv, ok := cert.PrivateKey.(*ecdsa.PrivateKey)
- if !ok {
- return fail(errors.New("tls: private key type does not match public key type"))
- }
- if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 {
- return fail(errors.New("tls: private key does not match public key"))
- }
- case ed25519.PublicKey:
- priv, ok := cert.PrivateKey.(ed25519.PrivateKey)
- if !ok {
- return fail(errors.New("tls: private key type does not match public key type"))
- }
- if !bytes.Equal(priv.Public().(ed25519.PublicKey), pub) {
- return fail(errors.New("tls: private key does not match public key"))
- }
- default:
- return fail(errors.New("tls: unknown public key algorithm"))
- }
-
- return cert, nil
-}
-
-// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates
-// PKCS #1 private keys by default, while OpenSSL 1.0.0 generates PKCS #8 keys.
-// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three.
-func parsePrivateKey(der []byte) (crypto.PrivateKey, error) {
- if key, err := x509.ParsePKCS1PrivateKey(der); err == nil {
- return key, nil
- }
- if key, err := x509.ParsePKCS8PrivateKey(der); err == nil {
- switch key := key.(type) {
- case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
- return key, nil
- default:
- return nil, errors.New("tls: found unknown private key type in PKCS#8 wrapping")
- }
- }
- if key, err := x509.ParseECPrivateKey(der); err == nil {
- return key, nil
- }
-
- return nil, errors.New("tls: failed to parse private key")
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/README.md b/vendor/github.com/marten-seemann/qtls-go1-17/README.md
deleted file mode 100644
index 3e9022127..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# qtls
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/marten-seemann/qtls-go1-17.svg)](https://pkg.go.dev/github.com/marten-seemann/qtls-go1-17)
-[![.github/workflows/go-test.yml](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml/badge.svg)](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml)
-
-This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go).
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/cipher_suites.go b/vendor/github.com/marten-seemann/qtls-go1-17/cipher_suites.go
deleted file mode 100644
index 53a3956aa..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/cipher_suites.go
+++ /dev/null
@@ -1,691 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
- "crypto/hmac"
- "crypto/rc4"
- "crypto/sha1"
- "crypto/sha256"
- "fmt"
- "hash"
-
- "golang.org/x/crypto/chacha20poly1305"
-)
-
-// CipherSuite is a TLS cipher suite. Note that most functions in this package
-// accept and expose cipher suite IDs instead of this type.
-type CipherSuite struct {
- ID uint16
- Name string
-
- // Supported versions is the list of TLS protocol versions that can
- // negotiate this cipher suite.
- SupportedVersions []uint16
-
- // Insecure is true if the cipher suite has known security issues
- // due to its primitives, design, or implementation.
- Insecure bool
-}
-
-var (
- supportedUpToTLS12 = []uint16{VersionTLS10, VersionTLS11, VersionTLS12}
- supportedOnlyTLS12 = []uint16{VersionTLS12}
- supportedOnlyTLS13 = []uint16{VersionTLS13}
-)
-
-// CipherSuites returns a list of cipher suites currently implemented by this
-// package, excluding those with security issues, which are returned by
-// InsecureCipherSuites.
-//
-// The list is sorted by ID. Note that the default cipher suites selected by
-// this package might depend on logic that can't be captured by a static list,
-// and might not match those returned by this function.
-func CipherSuites() []*CipherSuite {
- return []*CipherSuite{
- {TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
-
- {TLS_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", supportedOnlyTLS13, false},
- {TLS_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", supportedOnlyTLS13, false},
- {TLS_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", supportedOnlyTLS13, false},
-
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
- }
-}
-
-// InsecureCipherSuites returns a list of cipher suites currently implemented by
-// this package and which have security issues.
-//
-// Most applications should not use the cipher suites in this list, and should
-// only use those returned by CipherSuites.
-func InsecureCipherSuites() []*CipherSuite {
- // This list includes RC4, CBC_SHA256, and 3DES cipher suites. See
- // cipherSuitesPreferenceOrder for details.
- return []*CipherSuite{
- {TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
- {TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, true},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
- }
-}
-
-// CipherSuiteName returns the standard name for the passed cipher suite ID
-// (e.g. "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"), or a fallback representation
-// of the ID value if the cipher suite is not implemented by this package.
-func CipherSuiteName(id uint16) string {
- for _, c := range CipherSuites() {
- if c.ID == id {
- return c.Name
- }
- }
- for _, c := range InsecureCipherSuites() {
- if c.ID == id {
- return c.Name
- }
- }
- return fmt.Sprintf("0x%04X", id)
-}
-
-const (
- // suiteECDHE indicates that the cipher suite involves elliptic curve
- // Diffie-Hellman. This means that it should only be selected when the
- // client indicates that it supports ECC with a curve and point format
- // that we're happy with.
- suiteECDHE = 1 << iota
- // suiteECSign indicates that the cipher suite involves an ECDSA or
- // EdDSA signature and therefore may only be selected when the server's
- // certificate is ECDSA or EdDSA. If this is not set then the cipher suite
- // is RSA based.
- suiteECSign
- // suiteTLS12 indicates that the cipher suite should only be advertised
- // and accepted when using TLS 1.2.
- suiteTLS12
- // suiteSHA384 indicates that the cipher suite uses SHA384 as the
- // handshake hash.
- suiteSHA384
-)
-
-// A cipherSuite is a TLS 1.0–1.2 cipher suite, and defines the key exchange
-// mechanism, as well as the cipher+MAC pair or the AEAD.
-type cipherSuite struct {
- id uint16
- // the lengths, in bytes, of the key material needed for each component.
- keyLen int
- macLen int
- ivLen int
- ka func(version uint16) keyAgreement
- // flags is a bitmask of the suite* values, above.
- flags int
- cipher func(key, iv []byte, isRead bool) interface{}
- mac func(key []byte) hash.Hash
- aead func(key, fixedNonce []byte) aead
-}
-
-var cipherSuites = []*cipherSuite{ // TODO: replace with a map, since the order doesn't matter.
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12, cipherAES, macSHA256, nil},
- {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, cipherAES, macSHA256, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
- {TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM},
- {TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
- {TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12, cipherAES, macSHA256, nil},
- {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
- {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
- {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
- {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil},
- {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil},
- {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherRC4, macSHA1, nil},
-}
-
-// selectCipherSuite returns the first TLS 1.0–1.2 cipher suite from ids which
-// is also in supportedIDs and passes the ok filter.
-func selectCipherSuite(ids, supportedIDs []uint16, ok func(*cipherSuite) bool) *cipherSuite {
- for _, id := range ids {
- candidate := cipherSuiteByID(id)
- if candidate == nil || !ok(candidate) {
- continue
- }
-
- for _, suppID := range supportedIDs {
- if id == suppID {
- return candidate
- }
- }
- }
- return nil
-}
-
-// A cipherSuiteTLS13 defines only the pair of the AEAD algorithm and hash
-// algorithm to be used with HKDF. See RFC 8446, Appendix B.4.
-type cipherSuiteTLS13 struct {
- id uint16
- keyLen int
- aead func(key, fixedNonce []byte) aead
- hash crypto.Hash
-}
-
-type CipherSuiteTLS13 struct {
- ID uint16
- KeyLen int
- Hash crypto.Hash
- AEAD func(key, fixedNonce []byte) cipher.AEAD
-}
-
-func (c *CipherSuiteTLS13) IVLen() int {
- return aeadNonceLength
-}
-
-var cipherSuitesTLS13 = []*cipherSuiteTLS13{ // TODO: replace with a map.
- {TLS_AES_128_GCM_SHA256, 16, aeadAESGCMTLS13, crypto.SHA256},
- {TLS_CHACHA20_POLY1305_SHA256, 32, aeadChaCha20Poly1305, crypto.SHA256},
- {TLS_AES_256_GCM_SHA384, 32, aeadAESGCMTLS13, crypto.SHA384},
-}
-
-// cipherSuitesPreferenceOrder is the order in which we'll select (on the
-// server) or advertise (on the client) TLS 1.0–1.2 cipher suites.
-//
-// Cipher suites are filtered but not reordered based on the application and
-// peer's preferences, meaning we'll never select a suite lower in this list if
-// any higher one is available. This makes it more defensible to keep weaker
-// cipher suites enabled, especially on the server side where we get the last
-// word, since there are no known downgrade attacks on cipher suites selection.
-//
-// The list is sorted by applying the following priority rules, stopping at the
-// first (most important) applicable one:
-//
-// - Anything else comes before RC4
-//
-// RC4 has practically exploitable biases. See https://www.rc4nomore.com.
-//
-// - Anything else comes before CBC_SHA256
-//
-// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13
-// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and
-// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
-//
-// - Anything else comes before 3DES
-//
-// 3DES has 64-bit blocks, which makes it fundamentally susceptible to
-// birthday attacks. See https://sweet32.info.
-//
-// - ECDHE comes before anything else
-//
-// Once we got the broken stuff out of the way, the most important
-// property a cipher suite can have is forward secrecy. We don't
-// implement FFDHE, so that means ECDHE.
-//
-// - AEADs come before CBC ciphers
-//
-// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites
-// are fundamentally fragile, and suffered from an endless sequence of
-// padding oracle attacks. See https://eprint.iacr.org/2015/1129,
-// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and
-// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/.
-//
-// - AES comes before ChaCha20
-//
-// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster
-// than ChaCha20Poly1305.
-//
-// When AES hardware is not available, AES-128-GCM is one or more of: much
-// slower, way more complex, and less safe (because not constant time)
-// than ChaCha20Poly1305.
-//
-// We use this list if we think both peers have AES hardware, and
-// cipherSuitesPreferenceOrderNoAES otherwise.
-//
-// - AES-128 comes before AES-256
-//
-// The only potential advantages of AES-256 are better multi-target
-// margins, and hypothetical post-quantum properties. Neither apply to
-// TLS, and AES-256 is slower due to its four extra rounds (which don't
-// contribute to the advantages above).
-//
-// - ECDSA comes before RSA
-//
-// The relative order of ECDSA and RSA cipher suites doesn't matter,
-// as they depend on the certificate. Pick one to get a stable order.
-//
-var cipherSuitesPreferenceOrder = []uint16{
- // AEADs w/ ECDHE
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
-
- // CBC w/ ECDHE
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
-
- // AEADs w/o ECDHE
- TLS_RSA_WITH_AES_128_GCM_SHA256,
- TLS_RSA_WITH_AES_256_GCM_SHA384,
-
- // CBC w/o ECDHE
- TLS_RSA_WITH_AES_128_CBC_SHA,
- TLS_RSA_WITH_AES_256_CBC_SHA,
-
- // 3DES
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- TLS_RSA_WITH_3DES_EDE_CBC_SHA,
-
- // CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- TLS_RSA_WITH_AES_128_CBC_SHA256,
-
- // RC4
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- TLS_RSA_WITH_RC4_128_SHA,
-}
-
-var cipherSuitesPreferenceOrderNoAES = []uint16{
- // ChaCha20Poly1305
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
-
- // AES-GCM w/ ECDHE
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
-
- // The rest of cipherSuitesPreferenceOrder.
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- TLS_RSA_WITH_AES_128_GCM_SHA256,
- TLS_RSA_WITH_AES_256_GCM_SHA384,
- TLS_RSA_WITH_AES_128_CBC_SHA,
- TLS_RSA_WITH_AES_256_CBC_SHA,
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- TLS_RSA_WITH_AES_128_CBC_SHA256,
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- TLS_RSA_WITH_RC4_128_SHA,
-}
-
-// disabledCipherSuites are not used unless explicitly listed in
-// Config.CipherSuites. They MUST be at the end of cipherSuitesPreferenceOrder.
-var disabledCipherSuites = []uint16{
- // CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- TLS_RSA_WITH_AES_128_CBC_SHA256,
-
- // RC4
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- TLS_RSA_WITH_RC4_128_SHA,
-}
-
-var (
- defaultCipherSuitesLen = len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites)
- defaultCipherSuites = cipherSuitesPreferenceOrder[:defaultCipherSuitesLen]
-)
-
-// defaultCipherSuitesTLS13 is also the preference order, since there are no
-// disabled by default TLS 1.3 cipher suites. The same AES vs ChaCha20 logic as
-// cipherSuitesPreferenceOrder applies.
-var defaultCipherSuitesTLS13 = []uint16{
- TLS_AES_128_GCM_SHA256,
- TLS_AES_256_GCM_SHA384,
- TLS_CHACHA20_POLY1305_SHA256,
-}
-
-var defaultCipherSuitesTLS13NoAES = []uint16{
- TLS_CHACHA20_POLY1305_SHA256,
- TLS_AES_128_GCM_SHA256,
- TLS_AES_256_GCM_SHA384,
-}
-
-var aesgcmCiphers = map[uint16]bool{
- // TLS 1.2
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: true,
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: true,
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: true,
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: true,
- // TLS 1.3
- TLS_AES_128_GCM_SHA256: true,
- TLS_AES_256_GCM_SHA384: true,
-}
-
-var nonAESGCMAEADCiphers = map[uint16]bool{
- // TLS 1.2
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: true,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: true,
- // TLS 1.3
- TLS_CHACHA20_POLY1305_SHA256: true,
-}
-
-// aesgcmPreferred returns whether the first known cipher in the preference list
-// is an AES-GCM cipher, implying the peer has hardware support for it.
-func aesgcmPreferred(ciphers []uint16) bool {
- for _, cID := range ciphers {
- if c := cipherSuiteByID(cID); c != nil {
- return aesgcmCiphers[cID]
- }
- if c := cipherSuiteTLS13ByID(cID); c != nil {
- return aesgcmCiphers[cID]
- }
- }
- return false
-}
-
-func cipherRC4(key, iv []byte, isRead bool) interface{} {
- cipher, _ := rc4.NewCipher(key)
- return cipher
-}
-
-func cipher3DES(key, iv []byte, isRead bool) interface{} {
- block, _ := des.NewTripleDESCipher(key)
- if isRead {
- return cipher.NewCBCDecrypter(block, iv)
- }
- return cipher.NewCBCEncrypter(block, iv)
-}
-
-func cipherAES(key, iv []byte, isRead bool) interface{} {
- block, _ := aes.NewCipher(key)
- if isRead {
- return cipher.NewCBCDecrypter(block, iv)
- }
- return cipher.NewCBCEncrypter(block, iv)
-}
-
-// macSHA1 returns a SHA-1 based constant time MAC.
-func macSHA1(key []byte) hash.Hash {
- return hmac.New(newConstantTimeHash(sha1.New), key)
-}
-
-// macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and
-// is currently only used in disabled-by-default cipher suites.
-func macSHA256(key []byte) hash.Hash {
- return hmac.New(sha256.New, key)
-}
-
-type aead interface {
- cipher.AEAD
-
- // explicitNonceLen returns the number of bytes of explicit nonce
- // included in each record. This is eight for older AEADs and
- // zero for modern ones.
- explicitNonceLen() int
-}
-
-const (
- aeadNonceLength = 12
- noncePrefixLength = 4
-)
-
-// prefixNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
-// each call.
-type prefixNonceAEAD struct {
- // nonce contains the fixed part of the nonce in the first four bytes.
- nonce [aeadNonceLength]byte
- aead cipher.AEAD
-}
-
-func (f *prefixNonceAEAD) NonceSize() int { return aeadNonceLength - noncePrefixLength }
-func (f *prefixNonceAEAD) Overhead() int { return f.aead.Overhead() }
-func (f *prefixNonceAEAD) explicitNonceLen() int { return f.NonceSize() }
-
-func (f *prefixNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
- copy(f.nonce[4:], nonce)
- return f.aead.Seal(out, f.nonce[:], plaintext, additionalData)
-}
-
-func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
- copy(f.nonce[4:], nonce)
- return f.aead.Open(out, f.nonce[:], ciphertext, additionalData)
-}
-
-// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
-// before each call.
-type xorNonceAEAD struct {
- nonceMask [aeadNonceLength]byte
- aead cipher.AEAD
-}
-
-func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number
-func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() }
-func (f *xorNonceAEAD) explicitNonceLen() int { return 0 }
-
-func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
- result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData)
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
-
- return result
-}
-
-func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
- result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData)
- for i, b := range nonce {
- f.nonceMask[4+i] ^= b
- }
-
- return result, err
-}
-
-func aeadAESGCM(key, noncePrefix []byte) aead {
- if len(noncePrefix) != noncePrefixLength {
- panic("tls: internal error: wrong nonce length")
- }
- aes, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
-
- ret := &prefixNonceAEAD{aead: aead}
- copy(ret.nonce[:], noncePrefix)
- return ret
-}
-
-// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
-func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
- return aeadAESGCMTLS13(key, fixedNonce)
-}
-
-func aeadAESGCMTLS13(key, nonceMask []byte) aead {
- if len(nonceMask) != aeadNonceLength {
- panic("tls: internal error: wrong nonce length")
- }
- aes, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(aes)
- if err != nil {
- panic(err)
- }
-
- ret := &xorNonceAEAD{aead: aead}
- copy(ret.nonceMask[:], nonceMask)
- return ret
-}
-
-func aeadChaCha20Poly1305(key, nonceMask []byte) aead {
- if len(nonceMask) != aeadNonceLength {
- panic("tls: internal error: wrong nonce length")
- }
- aead, err := chacha20poly1305.New(key)
- if err != nil {
- panic(err)
- }
-
- ret := &xorNonceAEAD{aead: aead}
- copy(ret.nonceMask[:], nonceMask)
- return ret
-}
-
-type constantTimeHash interface {
- hash.Hash
- ConstantTimeSum(b []byte) []byte
-}
-
-// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces
-// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC.
-type cthWrapper struct {
- h constantTimeHash
-}
-
-func (c *cthWrapper) Size() int { return c.h.Size() }
-func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() }
-func (c *cthWrapper) Reset() { c.h.Reset() }
-func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) }
-func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
-
-func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
- return func() hash.Hash {
- return &cthWrapper{h().(constantTimeHash)}
- }
-}
-
-// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3.
-func tls10MAC(h hash.Hash, out, seq, header, data, extra []byte) []byte {
- h.Reset()
- h.Write(seq)
- h.Write(header)
- h.Write(data)
- res := h.Sum(out)
- if extra != nil {
- h.Write(extra)
- }
- return res
-}
-
-func rsaKA(version uint16) keyAgreement {
- return rsaKeyAgreement{}
-}
-
-func ecdheECDSAKA(version uint16) keyAgreement {
- return &ecdheKeyAgreement{
- isRSA: false,
- version: version,
- }
-}
-
-func ecdheRSAKA(version uint16) keyAgreement {
- return &ecdheKeyAgreement{
- isRSA: true,
- version: version,
- }
-}
-
-// mutualCipherSuite returns a cipherSuite given a list of supported
-// ciphersuites and the id requested by the peer.
-func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
- for _, id := range have {
- if id == want {
- return cipherSuiteByID(id)
- }
- }
- return nil
-}
-
-func cipherSuiteByID(id uint16) *cipherSuite {
- for _, cipherSuite := range cipherSuites {
- if cipherSuite.id == id {
- return cipherSuite
- }
- }
- return nil
-}
-
-func mutualCipherSuiteTLS13(have []uint16, want uint16) *cipherSuiteTLS13 {
- for _, id := range have {
- if id == want {
- return cipherSuiteTLS13ByID(id)
- }
- }
- return nil
-}
-
-func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 {
- for _, cipherSuite := range cipherSuitesTLS13 {
- if cipherSuite.id == id {
- return cipherSuite
- }
- }
- return nil
-}
-
-// A list of cipher suite IDs that are, or have been, implemented by this
-// package.
-//
-// See https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
-const (
- // TLS 1.0 - 1.2 cipher suites.
- TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
- TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
- TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
- TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c
- TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c
- TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d
- TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
- TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
- TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca8
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca9
-
- // TLS 1.3 cipher suites.
- TLS_AES_128_GCM_SHA256 uint16 = 0x1301
- TLS_AES_256_GCM_SHA384 uint16 = 0x1302
- TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303
-
- // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
- // that the client is doing version fallback. See RFC 7507.
- TLS_FALLBACK_SCSV uint16 = 0x5600
-
- // Legacy names for the corresponding cipher suites with the correct _SHA256
- // suffix, retained for backward compatibility.
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/common.go b/vendor/github.com/marten-seemann/qtls-go1-17/common.go
deleted file mode 100644
index 8a9b68048..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/common.go
+++ /dev/null
@@ -1,1485 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "container/list"
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha512"
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "io"
- "net"
- "strings"
- "sync"
- "time"
-)
-
-const (
- VersionTLS10 = 0x0301
- VersionTLS11 = 0x0302
- VersionTLS12 = 0x0303
- VersionTLS13 = 0x0304
-
- // Deprecated: SSLv3 is cryptographically broken, and is no longer
- // supported by this package. See golang.org/issue/32716.
- VersionSSL30 = 0x0300
-)
-
-const (
- maxPlaintext = 16384 // maximum plaintext payload length
- maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
- maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3
- recordHeaderLen = 5 // record header length
- maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
- maxUselessRecords = 16 // maximum number of consecutive non-advancing records
-)
-
-// TLS record types.
-type recordType uint8
-
-const (
- recordTypeChangeCipherSpec recordType = 20
- recordTypeAlert recordType = 21
- recordTypeHandshake recordType = 22
- recordTypeApplicationData recordType = 23
-)
-
-// TLS handshake message types.
-const (
- typeHelloRequest uint8 = 0
- typeClientHello uint8 = 1
- typeServerHello uint8 = 2
- typeNewSessionTicket uint8 = 4
- typeEndOfEarlyData uint8 = 5
- typeEncryptedExtensions uint8 = 8
- typeCertificate uint8 = 11
- typeServerKeyExchange uint8 = 12
- typeCertificateRequest uint8 = 13
- typeServerHelloDone uint8 = 14
- typeCertificateVerify uint8 = 15
- typeClientKeyExchange uint8 = 16
- typeFinished uint8 = 20
- typeCertificateStatus uint8 = 22
- typeKeyUpdate uint8 = 24
- typeNextProtocol uint8 = 67 // Not IANA assigned
- typeMessageHash uint8 = 254 // synthetic message
-)
-
-// TLS compression types.
-const (
- compressionNone uint8 = 0
-)
-
-type Extension struct {
- Type uint16
- Data []byte
-}
-
-// TLS extension numbers
-const (
- extensionServerName uint16 = 0
- extensionStatusRequest uint16 = 5
- extensionSupportedCurves uint16 = 10 // supported_groups in TLS 1.3, see RFC 8446, Section 4.2.7
- extensionSupportedPoints uint16 = 11
- extensionSignatureAlgorithms uint16 = 13
- extensionALPN uint16 = 16
- extensionSCT uint16 = 18
- extensionSessionTicket uint16 = 35
- extensionPreSharedKey uint16 = 41
- extensionEarlyData uint16 = 42
- extensionSupportedVersions uint16 = 43
- extensionCookie uint16 = 44
- extensionPSKModes uint16 = 45
- extensionCertificateAuthorities uint16 = 47
- extensionSignatureAlgorithmsCert uint16 = 50
- extensionKeyShare uint16 = 51
- extensionRenegotiationInfo uint16 = 0xff01
-)
-
-// TLS signaling cipher suite values
-const (
- scsvRenegotiation uint16 = 0x00ff
-)
-
-type EncryptionLevel uint8
-
-const (
- EncryptionHandshake EncryptionLevel = iota
- Encryption0RTT
- EncryptionApplication
-)
-
-// CurveID is a tls.CurveID
-type CurveID = tls.CurveID
-
-const (
- CurveP256 CurveID = 23
- CurveP384 CurveID = 24
- CurveP521 CurveID = 25
- X25519 CurveID = 29
-)
-
-// TLS 1.3 Key Share. See RFC 8446, Section 4.2.8.
-type keyShare struct {
- group CurveID
- data []byte
-}
-
-// TLS 1.3 PSK Key Exchange Modes. See RFC 8446, Section 4.2.9.
-const (
- pskModePlain uint8 = 0
- pskModeDHE uint8 = 1
-)
-
-// TLS 1.3 PSK Identity. Can be a Session Ticket, or a reference to a saved
-// session. See RFC 8446, Section 4.2.11.
-type pskIdentity struct {
- label []byte
- obfuscatedTicketAge uint32
-}
-
-// TLS Elliptic Curve Point Formats
-// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
-const (
- pointFormatUncompressed uint8 = 0
-)
-
-// TLS CertificateStatusType (RFC 3546)
-const (
- statusTypeOCSP uint8 = 1
-)
-
-// Certificate types (for certificateRequestMsg)
-const (
- certTypeRSASign = 1
- certTypeECDSASign = 64 // ECDSA or EdDSA keys, see RFC 8422, Section 3.
-)
-
-// Signature algorithms (for internal signaling use). Starting at 225 to avoid overlap with
-// TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do.
-const (
- signaturePKCS1v15 uint8 = iota + 225
- signatureRSAPSS
- signatureECDSA
- signatureEd25519
-)
-
-// directSigning is a standard Hash value that signals that no pre-hashing
-// should be performed, and that the input should be signed directly. It is the
-// hash function associated with the Ed25519 signature scheme.
-var directSigning crypto.Hash = 0
-
-// supportedSignatureAlgorithms contains the signature and hash algorithms that
-// the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+
-// CertificateRequest. The two fields are merged to match with TLS 1.3.
-// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
-var supportedSignatureAlgorithms = []SignatureScheme{
- PSSWithSHA256,
- ECDSAWithP256AndSHA256,
- Ed25519,
- PSSWithSHA384,
- PSSWithSHA512,
- PKCS1WithSHA256,
- PKCS1WithSHA384,
- PKCS1WithSHA512,
- ECDSAWithP384AndSHA384,
- ECDSAWithP521AndSHA512,
- PKCS1WithSHA1,
- ECDSAWithSHA1,
-}
-
-// helloRetryRequestRandom is set as the Random value of a ServerHello
-// to signal that the message is actually a HelloRetryRequest.
-var helloRetryRequestRandom = []byte{ // See RFC 8446, Section 4.1.3.
- 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11,
- 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91,
- 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E,
- 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C,
-}
-
-const (
- // downgradeCanaryTLS12 or downgradeCanaryTLS11 is embedded in the server
- // random as a downgrade protection if the server would be capable of
- // negotiating a higher version. See RFC 8446, Section 4.1.3.
- downgradeCanaryTLS12 = "DOWNGRD\x01"
- downgradeCanaryTLS11 = "DOWNGRD\x00"
-)
-
-// testingOnlyForceDowngradeCanary is set in tests to force the server side to
-// include downgrade canaries even if it's using its highers supported version.
-var testingOnlyForceDowngradeCanary bool
-
-type ConnectionState = tls.ConnectionState
-
-// ConnectionState records basic TLS details about the connection.
-type connectionState struct {
- // Version is the TLS version used by the connection (e.g. VersionTLS12).
- Version uint16
-
- // HandshakeComplete is true if the handshake has concluded.
- HandshakeComplete bool
-
- // DidResume is true if this connection was successfully resumed from a
- // previous session with a session ticket or similar mechanism.
- DidResume bool
-
- // CipherSuite is the cipher suite negotiated for the connection (e.g.
- // TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_AES_128_GCM_SHA256).
- CipherSuite uint16
-
- // NegotiatedProtocol is the application protocol negotiated with ALPN.
- NegotiatedProtocol string
-
- // NegotiatedProtocolIsMutual used to indicate a mutual NPN negotiation.
- //
- // Deprecated: this value is always true.
- NegotiatedProtocolIsMutual bool
-
- // ServerName is the value of the Server Name Indication extension sent by
- // the client. It's available both on the server and on the client side.
- ServerName string
-
- // PeerCertificates are the parsed certificates sent by the peer, in the
- // order in which they were sent. The first element is the leaf certificate
- // that the connection is verified against.
- //
- // On the client side, it can't be empty. On the server side, it can be
- // empty if Config.ClientAuth is not RequireAnyClientCert or
- // RequireAndVerifyClientCert.
- PeerCertificates []*x509.Certificate
-
- // VerifiedChains is a list of one or more chains where the first element is
- // PeerCertificates[0] and the last element is from Config.RootCAs (on the
- // client side) or Config.ClientCAs (on the server side).
- //
- // On the client side, it's set if Config.InsecureSkipVerify is false. On
- // the server side, it's set if Config.ClientAuth is VerifyClientCertIfGiven
- // (and the peer provided a certificate) or RequireAndVerifyClientCert.
- VerifiedChains [][]*x509.Certificate
-
- // SignedCertificateTimestamps is a list of SCTs provided by the peer
- // through the TLS handshake for the leaf certificate, if any.
- SignedCertificateTimestamps [][]byte
-
- // OCSPResponse is a stapled Online Certificate Status Protocol (OCSP)
- // response provided by the peer for the leaf certificate, if any.
- OCSPResponse []byte
-
- // TLSUnique contains the "tls-unique" channel binding value (see RFC 5929,
- // Section 3). This value will be nil for TLS 1.3 connections and for all
- // resumed connections.
- //
- // Deprecated: there are conditions in which this value might not be unique
- // to a connection. See the Security Considerations sections of RFC 5705 and
- // RFC 7627, and https://mitls.org/pages/attacks/3SHAKE#channelbindings.
- TLSUnique []byte
-
- // ekm is a closure exposed via ExportKeyingMaterial.
- ekm func(label string, context []byte, length int) ([]byte, error)
-}
-
-type ConnectionStateWith0RTT struct {
- ConnectionState
-
- Used0RTT bool // true if 0-RTT was both offered and accepted
-}
-
-// ClientAuthType is tls.ClientAuthType
-type ClientAuthType = tls.ClientAuthType
-
-const (
- NoClientCert = tls.NoClientCert
- RequestClientCert = tls.RequestClientCert
- RequireAnyClientCert = tls.RequireAnyClientCert
- VerifyClientCertIfGiven = tls.VerifyClientCertIfGiven
- RequireAndVerifyClientCert = tls.RequireAndVerifyClientCert
-)
-
-// requiresClientCert reports whether the ClientAuthType requires a client
-// certificate to be provided.
-func requiresClientCert(c ClientAuthType) bool {
- switch c {
- case RequireAnyClientCert, RequireAndVerifyClientCert:
- return true
- default:
- return false
- }
-}
-
-// ClientSessionState contains the state needed by clients to resume TLS
-// sessions.
-type ClientSessionState = tls.ClientSessionState
-
-type clientSessionState struct {
- sessionTicket []uint8 // Encrypted ticket used for session resumption with server
- vers uint16 // TLS version negotiated for the session
- cipherSuite uint16 // Ciphersuite negotiated for the session
- masterSecret []byte // Full handshake MasterSecret, or TLS 1.3 resumption_master_secret
- serverCertificates []*x509.Certificate // Certificate chain presented by the server
- verifiedChains [][]*x509.Certificate // Certificate chains we built for verification
- receivedAt time.Time // When the session ticket was received from the server
- ocspResponse []byte // Stapled OCSP response presented by the server
- scts [][]byte // SCTs presented by the server
-
- // TLS 1.3 fields.
- nonce []byte // Ticket nonce sent by the server, to derive PSK
- useBy time.Time // Expiration of the ticket lifetime as set by the server
- ageAdd uint32 // Random obfuscation factor for sending the ticket age
-}
-
-// ClientSessionCache is a cache of ClientSessionState objects that can be used
-// by a client to resume a TLS session with a given server. ClientSessionCache
-// implementations should expect to be called concurrently from different
-// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not
-// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which
-// are supported via this interface.
-//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/marten-seemann/qtls-go1-17 ClientSessionCache"
-type ClientSessionCache = tls.ClientSessionCache
-
-// SignatureScheme is a tls.SignatureScheme
-type SignatureScheme = tls.SignatureScheme
-
-const (
- // RSASSA-PKCS1-v1_5 algorithms.
- PKCS1WithSHA256 SignatureScheme = 0x0401
- PKCS1WithSHA384 SignatureScheme = 0x0501
- PKCS1WithSHA512 SignatureScheme = 0x0601
-
- // RSASSA-PSS algorithms with public key OID rsaEncryption.
- PSSWithSHA256 SignatureScheme = 0x0804
- PSSWithSHA384 SignatureScheme = 0x0805
- PSSWithSHA512 SignatureScheme = 0x0806
-
- // ECDSA algorithms. Only constrained to a specific curve in TLS 1.3.
- ECDSAWithP256AndSHA256 SignatureScheme = 0x0403
- ECDSAWithP384AndSHA384 SignatureScheme = 0x0503
- ECDSAWithP521AndSHA512 SignatureScheme = 0x0603
-
- // EdDSA algorithms.
- Ed25519 SignatureScheme = 0x0807
-
- // Legacy signature and hash algorithms for TLS 1.2.
- PKCS1WithSHA1 SignatureScheme = 0x0201
- ECDSAWithSHA1 SignatureScheme = 0x0203
-)
-
-// ClientHelloInfo contains information from a ClientHello message in order to
-// guide application logic in the GetCertificate and GetConfigForClient callbacks.
-type ClientHelloInfo = tls.ClientHelloInfo
-
-type clientHelloInfo struct {
- // CipherSuites lists the CipherSuites supported by the client (e.g.
- // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).
- CipherSuites []uint16
-
- // ServerName indicates the name of the server requested by the client
- // in order to support virtual hosting. ServerName is only set if the
- // client is using SNI (see RFC 4366, Section 3.1).
- ServerName string
-
- // SupportedCurves lists the elliptic curves supported by the client.
- // SupportedCurves is set only if the Supported Elliptic Curves
- // Extension is being used (see RFC 4492, Section 5.1.1).
- SupportedCurves []CurveID
-
- // SupportedPoints lists the point formats supported by the client.
- // SupportedPoints is set only if the Supported Point Formats Extension
- // is being used (see RFC 4492, Section 5.1.2).
- SupportedPoints []uint8
-
- // SignatureSchemes lists the signature and hash schemes that the client
- // is willing to verify. SignatureSchemes is set only if the Signature
- // Algorithms Extension is being used (see RFC 5246, Section 7.4.1.4.1).
- SignatureSchemes []SignatureScheme
-
- // SupportedProtos lists the application protocols supported by the client.
- // SupportedProtos is set only if the Application-Layer Protocol
- // Negotiation Extension is being used (see RFC 7301, Section 3.1).
- //
- // Servers can select a protocol by setting Config.NextProtos in a
- // GetConfigForClient return value.
- SupportedProtos []string
-
- // SupportedVersions lists the TLS versions supported by the client.
- // For TLS versions less than 1.3, this is extrapolated from the max
- // version advertised by the client, so values other than the greatest
- // might be rejected if used.
- SupportedVersions []uint16
-
- // Conn is the underlying net.Conn for the connection. Do not read
- // from, or write to, this connection; that will cause the TLS
- // connection to fail.
- Conn net.Conn
-
- // config is embedded by the GetCertificate or GetConfigForClient caller,
- // for use with SupportsCertificate.
- config *Config
-
- // ctx is the context of the handshake that is in progress.
- ctx context.Context
-}
-
-// Context returns the context of the handshake that is in progress.
-// This context is a child of the context passed to HandshakeContext,
-// if any, and is canceled when the handshake concludes.
-func (c *clientHelloInfo) Context() context.Context {
- return c.ctx
-}
-
-// CertificateRequestInfo contains information from a server's
-// CertificateRequest message, which is used to demand a certificate and proof
-// of control from a client.
-type CertificateRequestInfo = tls.CertificateRequestInfo
-
-type certificateRequestInfo struct {
- // AcceptableCAs contains zero or more, DER-encoded, X.501
- // Distinguished Names. These are the names of root or intermediate CAs
- // that the server wishes the returned certificate to be signed by. An
- // empty slice indicates that the server has no preference.
- AcceptableCAs [][]byte
-
- // SignatureSchemes lists the signature schemes that the server is
- // willing to verify.
- SignatureSchemes []SignatureScheme
-
- // Version is the TLS version that was negotiated for this connection.
- Version uint16
-
- // ctx is the context of the handshake that is in progress.
- ctx context.Context
-}
-
-// Context returns the context of the handshake that is in progress.
-// This context is a child of the context passed to HandshakeContext,
-// if any, and is canceled when the handshake concludes.
-func (c *certificateRequestInfo) Context() context.Context {
- return c.ctx
-}
-
-// RenegotiationSupport enumerates the different levels of support for TLS
-// renegotiation. TLS renegotiation is the act of performing subsequent
-// handshakes on a connection after the first. This significantly complicates
-// the state machine and has been the source of numerous, subtle security
-// issues. Initiating a renegotiation is not supported, but support for
-// accepting renegotiation requests may be enabled.
-//
-// Even when enabled, the server may not change its identity between handshakes
-// (i.e. the leaf certificate must be the same). Additionally, concurrent
-// handshake and application data flow is not permitted so renegotiation can
-// only be used with protocols that synchronise with the renegotiation, such as
-// HTTPS.
-//
-// Renegotiation is not defined in TLS 1.3.
-type RenegotiationSupport = tls.RenegotiationSupport
-
-const (
- // RenegotiateNever disables renegotiation.
- RenegotiateNever = tls.RenegotiateNever
-
- // RenegotiateOnceAsClient allows a remote server to request
- // renegotiation once per connection.
- RenegotiateOnceAsClient = tls.RenegotiateOnceAsClient
-
- // RenegotiateFreelyAsClient allows a remote server to repeatedly
- // request renegotiation.
- RenegotiateFreelyAsClient = tls.RenegotiateFreelyAsClient
-)
-
-// A Config structure is used to configure a TLS client or server.
-// After one has been passed to a TLS function it must not be
-// modified. A Config may be reused; the tls package will also not
-// modify it.
-type Config = tls.Config
-
-type config struct {
- // Rand provides the source of entropy for nonces and RSA blinding.
- // If Rand is nil, TLS uses the cryptographic random reader in package
- // crypto/rand.
- // The Reader must be safe for use by multiple goroutines.
- Rand io.Reader
-
- // Time returns the current time as the number of seconds since the epoch.
- // If Time is nil, TLS uses time.Now.
- Time func() time.Time
-
- // Certificates contains one or more certificate chains to present to the
- // other side of the connection. The first certificate compatible with the
- // peer's requirements is selected automatically.
- //
- // Server configurations must set one of Certificates, GetCertificate or
- // GetConfigForClient. Clients doing client-authentication may set either
- // Certificates or GetClientCertificate.
- //
- // Note: if there are multiple Certificates, and they don't have the
- // optional field Leaf set, certificate selection will incur a significant
- // per-handshake performance cost.
- Certificates []Certificate
-
- // NameToCertificate maps from a certificate name to an element of
- // Certificates. Note that a certificate name can be of the form
- // '*.example.com' and so doesn't have to be a domain name as such.
- //
- // Deprecated: NameToCertificate only allows associating a single
- // certificate with a given name. Leave this field nil to let the library
- // select the first compatible chain from Certificates.
- NameToCertificate map[string]*Certificate
-
- // GetCertificate returns a Certificate based on the given
- // ClientHelloInfo. It will only be called if the client supplies SNI
- // information or if Certificates is empty.
- //
- // If GetCertificate is nil or returns nil, then the certificate is
- // retrieved from NameToCertificate. If NameToCertificate is nil, the
- // best element of Certificates will be used.
- GetCertificate func(*ClientHelloInfo) (*Certificate, error)
-
- // GetClientCertificate, if not nil, is called when a server requests a
- // certificate from a client. If set, the contents of Certificates will
- // be ignored.
- //
- // If GetClientCertificate returns an error, the handshake will be
- // aborted and that error will be returned. Otherwise
- // GetClientCertificate must return a non-nil Certificate. If
- // Certificate.Certificate is empty then no certificate will be sent to
- // the server. If this is unacceptable to the server then it may abort
- // the handshake.
- //
- // GetClientCertificate may be called multiple times for the same
- // connection if renegotiation occurs or if TLS 1.3 is in use.
- GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error)
-
- // GetConfigForClient, if not nil, is called after a ClientHello is
- // received from a client. It may return a non-nil Config in order to
- // change the Config that will be used to handle this connection. If
- // the returned Config is nil, the original Config will be used. The
- // Config returned by this callback may not be subsequently modified.
- //
- // If GetConfigForClient is nil, the Config passed to Server() will be
- // used for all connections.
- //
- // If SessionTicketKey was explicitly set on the returned Config, or if
- // SetSessionTicketKeys was called on the returned Config, those keys will
- // be used. Otherwise, the original Config keys will be used (and possibly
- // rotated if they are automatically managed).
- GetConfigForClient func(*ClientHelloInfo) (*Config, error)
-
- // VerifyPeerCertificate, if not nil, is called after normal
- // certificate verification by either a TLS client or server. It
- // receives the raw ASN.1 certificates provided by the peer and also
- // any verified chains that normal processing found. If it returns a
- // non-nil error, the handshake is aborted and that error results.
- //
- // If normal verification fails then the handshake will abort before
- // considering this callback. If normal verification is disabled by
- // setting InsecureSkipVerify, or (for a server) when ClientAuth is
- // RequestClientCert or RequireAnyClientCert, then this callback will
- // be considered but the verifiedChains argument will always be nil.
- VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
-
- // VerifyConnection, if not nil, is called after normal certificate
- // verification and after VerifyPeerCertificate by either a TLS client
- // or server. If it returns a non-nil error, the handshake is aborted
- // and that error results.
- //
- // If normal verification fails then the handshake will abort before
- // considering this callback. This callback will run for all connections
- // regardless of InsecureSkipVerify or ClientAuth settings.
- VerifyConnection func(ConnectionState) error
-
- // RootCAs defines the set of root certificate authorities
- // that clients use when verifying server certificates.
- // If RootCAs is nil, TLS uses the host's root CA set.
- RootCAs *x509.CertPool
-
- // NextProtos is a list of supported application level protocols, in
- // order of preference. If both peers support ALPN, the selected
- // protocol will be one from this list, and the connection will fail
- // if there is no mutually supported protocol. If NextProtos is empty
- // or the peer doesn't support ALPN, the connection will succeed and
- // ConnectionState.NegotiatedProtocol will be empty.
- NextProtos []string
-
- // ServerName is used to verify the hostname on the returned
- // certificates unless InsecureSkipVerify is given. It is also included
- // in the client's handshake to support virtual hosting unless it is
- // an IP address.
- ServerName string
-
- // ClientAuth determines the server's policy for
- // TLS Client Authentication. The default is NoClientCert.
- ClientAuth ClientAuthType
-
- // ClientCAs defines the set of root certificate authorities
- // that servers use if required to verify a client certificate
- // by the policy in ClientAuth.
- ClientCAs *x509.CertPool
-
- // InsecureSkipVerify controls whether a client verifies the server's
- // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls
- // accepts any certificate presented by the server and any host name in that
- // certificate. In this mode, TLS is susceptible to machine-in-the-middle
- // attacks unless custom verification is used. This should be used only for
- // testing or in combination with VerifyConnection or VerifyPeerCertificate.
- InsecureSkipVerify bool
-
- // CipherSuites is a list of enabled TLS 1.0–1.2 cipher suites. The order of
- // the list is ignored. Note that TLS 1.3 ciphersuites are not configurable.
- //
- // If CipherSuites is nil, a safe default list is used. The default cipher
- // suites might change over time.
- CipherSuites []uint16
-
- // PreferServerCipherSuites is a legacy field and has no effect.
- //
- // It used to control whether the server would follow the client's or the
- // server's preference. Servers now select the best mutually supported
- // cipher suite based on logic that takes into account inferred client
- // hardware, server hardware, and security.
- //
- // Deprected: PreferServerCipherSuites is ignored.
- PreferServerCipherSuites bool
-
- // SessionTicketsDisabled may be set to true to disable session ticket and
- // PSK (resumption) support. Note that on clients, session ticket support is
- // also disabled if ClientSessionCache is nil.
- SessionTicketsDisabled bool
-
- // SessionTicketKey is used by TLS servers to provide session resumption.
- // See RFC 5077 and the PSK mode of RFC 8446. If zero, it will be filled
- // with random data before the first server handshake.
- //
- // Deprecated: if this field is left at zero, session ticket keys will be
- // automatically rotated every day and dropped after seven days. For
- // customizing the rotation schedule or synchronizing servers that are
- // terminating connections for the same host, use SetSessionTicketKeys.
- SessionTicketKey [32]byte
-
- // ClientSessionCache is a cache of ClientSessionState entries for TLS
- // session resumption. It is only used by clients.
- ClientSessionCache ClientSessionCache
-
- // MinVersion contains the minimum TLS version that is acceptable.
- // If zero, TLS 1.0 is currently taken as the minimum.
- MinVersion uint16
-
- // MaxVersion contains the maximum TLS version that is acceptable.
- // If zero, the maximum version supported by this package is used,
- // which is currently TLS 1.3.
- MaxVersion uint16
-
- // CurvePreferences contains the elliptic curves that will be used in
- // an ECDHE handshake, in preference order. If empty, the default will
- // be used. The client will use the first preference as the type for
- // its key share in TLS 1.3. This may change in the future.
- CurvePreferences []CurveID
-
- // DynamicRecordSizingDisabled disables adaptive sizing of TLS records.
- // When true, the largest possible TLS record size is always used. When
- // false, the size of TLS records may be adjusted in an attempt to
- // improve latency.
- DynamicRecordSizingDisabled bool
-
- // Renegotiation controls what types of renegotiation are supported.
- // The default, none, is correct for the vast majority of applications.
- Renegotiation RenegotiationSupport
-
- // KeyLogWriter optionally specifies a destination for TLS master secrets
- // in NSS key log format that can be used to allow external programs
- // such as Wireshark to decrypt TLS connections.
- // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
- // Use of KeyLogWriter compromises security and should only be
- // used for debugging.
- KeyLogWriter io.Writer
-
- // mutex protects sessionTicketKeys and autoSessionTicketKeys.
- mutex sync.RWMutex
- // sessionTicketKeys contains zero or more ticket keys. If set, it means the
- // the keys were set with SessionTicketKey or SetSessionTicketKeys. The
- // first key is used for new tickets and any subsequent keys can be used to
- // decrypt old tickets. The slice contents are not protected by the mutex
- // and are immutable.
- sessionTicketKeys []ticketKey
- // autoSessionTicketKeys is like sessionTicketKeys but is owned by the
- // auto-rotation logic. See Config.ticketKeys.
- autoSessionTicketKeys []ticketKey
-}
-
-// A RecordLayer handles encrypting and decrypting of TLS messages.
-type RecordLayer interface {
- SetReadKey(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte)
- SetWriteKey(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte)
- ReadHandshakeMessage() ([]byte, error)
- WriteRecord([]byte) (int, error)
- SendAlert(uint8)
-}
-
-type ExtraConfig struct {
- // GetExtensions, if not nil, is called before a message that allows
- // sending of extensions is sent.
- // Currently only implemented for the ClientHello message (for the client)
- // and for the EncryptedExtensions message (for the server).
- // Only valid for TLS 1.3.
- GetExtensions func(handshakeMessageType uint8) []Extension
-
- // ReceivedExtensions, if not nil, is called when a message that allows the
- // inclusion of extensions is received.
- // It is called with an empty slice of extensions, if the message didn't
- // contain any extensions.
- // Currently only implemented for the ClientHello message (sent by the
- // client) and for the EncryptedExtensions message (sent by the server).
- // Only valid for TLS 1.3.
- ReceivedExtensions func(handshakeMessageType uint8, exts []Extension)
-
- // AlternativeRecordLayer is used by QUIC
- AlternativeRecordLayer RecordLayer
-
- // Enforce the selection of a supported application protocol.
- // Only works for TLS 1.3.
- // If enabled, client and server have to agree on an application protocol.
- // Otherwise, connection establishment fails.
- EnforceNextProtoSelection bool
-
- // If MaxEarlyData is greater than 0, the client will be allowed to send early
- // data when resuming a session.
- // Requires the AlternativeRecordLayer to be set.
- //
- // It has no meaning on the client.
- MaxEarlyData uint32
-
- // The Accept0RTT callback is called when the client offers 0-RTT.
- // The server then has to decide if it wants to accept or reject 0-RTT.
- // It is only used for servers.
- Accept0RTT func(appData []byte) bool
-
- // 0RTTRejected is called when the server rejectes 0-RTT.
- // It is only used for clients.
- Rejected0RTT func()
-
- // If set, the client will export the 0-RTT key when resuming a session that
- // allows sending of early data.
- // Requires the AlternativeRecordLayer to be set.
- //
- // It has no meaning to the server.
- Enable0RTT bool
-
- // Is called when the client saves a session ticket to the session ticket.
- // This gives the application the opportunity to save some data along with the ticket,
- // which can be restored when the session ticket is used.
- GetAppDataForSessionState func() []byte
-
- // Is called when the client uses a session ticket.
- // Restores the application data that was saved earlier on GetAppDataForSessionTicket.
- SetAppDataFromSessionState func([]byte)
-}
-
-// Clone clones.
-func (c *ExtraConfig) Clone() *ExtraConfig {
- return &ExtraConfig{
- GetExtensions: c.GetExtensions,
- ReceivedExtensions: c.ReceivedExtensions,
- AlternativeRecordLayer: c.AlternativeRecordLayer,
- EnforceNextProtoSelection: c.EnforceNextProtoSelection,
- MaxEarlyData: c.MaxEarlyData,
- Enable0RTT: c.Enable0RTT,
- Accept0RTT: c.Accept0RTT,
- Rejected0RTT: c.Rejected0RTT,
- GetAppDataForSessionState: c.GetAppDataForSessionState,
- SetAppDataFromSessionState: c.SetAppDataFromSessionState,
- }
-}
-
-func (c *ExtraConfig) usesAlternativeRecordLayer() bool {
- return c != nil && c.AlternativeRecordLayer != nil
-}
-
-const (
- // ticketKeyNameLen is the number of bytes of identifier that is prepended to
- // an encrypted session ticket in order to identify the key used to encrypt it.
- ticketKeyNameLen = 16
-
- // ticketKeyLifetime is how long a ticket key remains valid and can be used to
- // resume a client connection.
- ticketKeyLifetime = 7 * 24 * time.Hour // 7 days
-
- // ticketKeyRotation is how often the server should rotate the session ticket key
- // that is used for new tickets.
- ticketKeyRotation = 24 * time.Hour
-)
-
-// ticketKey is the internal representation of a session ticket key.
-type ticketKey struct {
- // keyName is an opaque byte string that serves to identify the session
- // ticket key. It's exposed as plaintext in every session ticket.
- keyName [ticketKeyNameLen]byte
- aesKey [16]byte
- hmacKey [16]byte
- // created is the time at which this ticket key was created. See Config.ticketKeys.
- created time.Time
-}
-
-// ticketKeyFromBytes converts from the external representation of a session
-// ticket key to a ticketKey. Externally, session ticket keys are 32 random
-// bytes and this function expands that into sufficient name and key material.
-func (c *config) ticketKeyFromBytes(b [32]byte) (key ticketKey) {
- hashed := sha512.Sum512(b[:])
- copy(key.keyName[:], hashed[:ticketKeyNameLen])
- copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16])
- copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32])
- key.created = c.time()
- return key
-}
-
-// maxSessionTicketLifetime is the maximum allowed lifetime of a TLS 1.3 session
-// ticket, and the lifetime we set for tickets we send.
-const maxSessionTicketLifetime = 7 * 24 * time.Hour
-
-// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a Config that is
-// being used concurrently by a TLS client or server.
-func (c *config) Clone() *config {
- if c == nil {
- return nil
- }
- c.mutex.RLock()
- defer c.mutex.RUnlock()
- return &config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- GetClientCertificate: c.GetClientCertificate,
- GetConfigForClient: c.GetConfigForClient,
- VerifyPeerCertificate: c.VerifyPeerCertificate,
- VerifyConnection: c.VerifyConnection,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- KeyLogWriter: c.KeyLogWriter,
- sessionTicketKeys: c.sessionTicketKeys,
- autoSessionTicketKeys: c.autoSessionTicketKeys,
- }
-}
-
-// deprecatedSessionTicketKey is set as the prefix of SessionTicketKey if it was
-// randomized for backwards compatibility but is not in use.
-var deprecatedSessionTicketKey = []byte("DEPRECATED")
-
-// initLegacySessionTicketKeyRLocked ensures the legacy SessionTicketKey field is
-// randomized if empty, and that sessionTicketKeys is populated from it otherwise.
-func (c *config) initLegacySessionTicketKeyRLocked() {
- // Don't write if SessionTicketKey is already defined as our deprecated string,
- // or if it is defined by the user but sessionTicketKeys is already set.
- if c.SessionTicketKey != [32]byte{} &&
- (bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) || len(c.sessionTicketKeys) > 0) {
- return
- }
-
- // We need to write some data, so get an exclusive lock and re-check any conditions.
- c.mutex.RUnlock()
- defer c.mutex.RLock()
- c.mutex.Lock()
- defer c.mutex.Unlock()
- if c.SessionTicketKey == [32]byte{} {
- if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil {
- panic(fmt.Sprintf("tls: unable to generate random session ticket key: %v", err))
- }
- // Write the deprecated prefix at the beginning so we know we created
- // it. This key with the DEPRECATED prefix isn't used as an actual
- // session ticket key, and is only randomized in case the application
- // reuses it for some reason.
- copy(c.SessionTicketKey[:], deprecatedSessionTicketKey)
- } else if !bytes.HasPrefix(c.SessionTicketKey[:], deprecatedSessionTicketKey) && len(c.sessionTicketKeys) == 0 {
- c.sessionTicketKeys = []ticketKey{c.ticketKeyFromBytes(c.SessionTicketKey)}
- }
-
-}
-
-// ticketKeys returns the ticketKeys for this connection.
-// If configForClient has explicitly set keys, those will
-// be returned. Otherwise, the keys on c will be used and
-// may be rotated if auto-managed.
-// During rotation, any expired session ticket keys are deleted from
-// c.sessionTicketKeys. If the session ticket key that is currently
-// encrypting tickets (ie. the first ticketKey in c.sessionTicketKeys)
-// is not fresh, then a new session ticket key will be
-// created and prepended to c.sessionTicketKeys.
-func (c *config) ticketKeys(configForClient *config) []ticketKey {
- // If the ConfigForClient callback returned a Config with explicitly set
- // keys, use those, otherwise just use the original Config.
- if configForClient != nil {
- configForClient.mutex.RLock()
- if configForClient.SessionTicketsDisabled {
- return nil
- }
- configForClient.initLegacySessionTicketKeyRLocked()
- if len(configForClient.sessionTicketKeys) != 0 {
- ret := configForClient.sessionTicketKeys
- configForClient.mutex.RUnlock()
- return ret
- }
- configForClient.mutex.RUnlock()
- }
-
- c.mutex.RLock()
- defer c.mutex.RUnlock()
- if c.SessionTicketsDisabled {
- return nil
- }
- c.initLegacySessionTicketKeyRLocked()
- if len(c.sessionTicketKeys) != 0 {
- return c.sessionTicketKeys
- }
- // Fast path for the common case where the key is fresh enough.
- if len(c.autoSessionTicketKeys) > 0 && c.time().Sub(c.autoSessionTicketKeys[0].created) < ticketKeyRotation {
- return c.autoSessionTicketKeys
- }
-
- // autoSessionTicketKeys are managed by auto-rotation.
- c.mutex.RUnlock()
- defer c.mutex.RLock()
- c.mutex.Lock()
- defer c.mutex.Unlock()
- // Re-check the condition in case it changed since obtaining the new lock.
- if len(c.autoSessionTicketKeys) == 0 || c.time().Sub(c.autoSessionTicketKeys[0].created) >= ticketKeyRotation {
- var newKey [32]byte
- if _, err := io.ReadFull(c.rand(), newKey[:]); err != nil {
- panic(fmt.Sprintf("unable to generate random session ticket key: %v", err))
- }
- valid := make([]ticketKey, 0, len(c.autoSessionTicketKeys)+1)
- valid = append(valid, c.ticketKeyFromBytes(newKey))
- for _, k := range c.autoSessionTicketKeys {
- // While rotating the current key, also remove any expired ones.
- if c.time().Sub(k.created) < ticketKeyLifetime {
- valid = append(valid, k)
- }
- }
- c.autoSessionTicketKeys = valid
- }
- return c.autoSessionTicketKeys
-}
-
-// SetSessionTicketKeys updates the session ticket keys for a server.
-//
-// The first key will be used when creating new tickets, while all keys can be
-// used for decrypting tickets. It is safe to call this function while the
-// server is running in order to rotate the session ticket keys. The function
-// will panic if keys is empty.
-//
-// Calling this function will turn off automatic session ticket key rotation.
-//
-// If multiple servers are terminating connections for the same host they should
-// all have the same session ticket keys. If the session ticket keys leaks,
-// previously recorded and future TLS connections using those keys might be
-// compromised.
-func (c *config) SetSessionTicketKeys(keys [][32]byte) {
- if len(keys) == 0 {
- panic("tls: keys must have at least one key")
- }
-
- newKeys := make([]ticketKey, len(keys))
- for i, bytes := range keys {
- newKeys[i] = c.ticketKeyFromBytes(bytes)
- }
-
- c.mutex.Lock()
- c.sessionTicketKeys = newKeys
- c.mutex.Unlock()
-}
-
-func (c *config) rand() io.Reader {
- r := c.Rand
- if r == nil {
- return rand.Reader
- }
- return r
-}
-
-func (c *config) time() time.Time {
- t := c.Time
- if t == nil {
- t = time.Now
- }
- return t()
-}
-
-func (c *config) cipherSuites() []uint16 {
- if c.CipherSuites != nil {
- return c.CipherSuites
- }
- return defaultCipherSuites
-}
-
-var supportedVersions = []uint16{
- VersionTLS13,
- VersionTLS12,
- VersionTLS11,
- VersionTLS10,
-}
-
-func (c *config) supportedVersions() []uint16 {
- versions := make([]uint16, 0, len(supportedVersions))
- for _, v := range supportedVersions {
- if c != nil && c.MinVersion != 0 && v < c.MinVersion {
- continue
- }
- if c != nil && c.MaxVersion != 0 && v > c.MaxVersion {
- continue
- }
- versions = append(versions, v)
- }
- return versions
-}
-
-func (c *config) maxSupportedVersion() uint16 {
- supportedVersions := c.supportedVersions()
- if len(supportedVersions) == 0 {
- return 0
- }
- return supportedVersions[0]
-}
-
-// supportedVersionsFromMax returns a list of supported versions derived from a
-// legacy maximum version value. Note that only versions supported by this
-// library are returned. Any newer peer will use supportedVersions anyway.
-func supportedVersionsFromMax(maxVersion uint16) []uint16 {
- versions := make([]uint16, 0, len(supportedVersions))
- for _, v := range supportedVersions {
- if v > maxVersion {
- continue
- }
- versions = append(versions, v)
- }
- return versions
-}
-
-var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521}
-
-func (c *config) curvePreferences() []CurveID {
- if c == nil || len(c.CurvePreferences) == 0 {
- return defaultCurvePreferences
- }
- return c.CurvePreferences
-}
-
-func (c *config) supportsCurve(curve CurveID) bool {
- for _, cc := range c.curvePreferences() {
- if cc == curve {
- return true
- }
- }
- return false
-}
-
-// mutualVersion returns the protocol version to use given the advertised
-// versions of the peer. Priority is given to the peer preference order.
-func (c *config) mutualVersion(peerVersions []uint16) (uint16, bool) {
- supportedVersions := c.supportedVersions()
- for _, peerVersion := range peerVersions {
- for _, v := range supportedVersions {
- if v == peerVersion {
- return v, true
- }
- }
- }
- return 0, false
-}
-
-var errNoCertificates = errors.New("tls: no certificates configured")
-
-// getCertificate returns the best certificate for the given ClientHelloInfo,
-// defaulting to the first element of c.Certificates.
-func (c *config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) {
- if c.GetCertificate != nil &&
- (len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) {
- cert, err := c.GetCertificate(clientHello)
- if cert != nil || err != nil {
- return cert, err
- }
- }
-
- if len(c.Certificates) == 0 {
- return nil, errNoCertificates
- }
-
- if len(c.Certificates) == 1 {
- // There's only one choice, so no point doing any work.
- return &c.Certificates[0], nil
- }
-
- if c.NameToCertificate != nil {
- name := strings.ToLower(clientHello.ServerName)
- if cert, ok := c.NameToCertificate[name]; ok {
- return cert, nil
- }
- if len(name) > 0 {
- labels := strings.Split(name, ".")
- labels[0] = "*"
- wildcardName := strings.Join(labels, ".")
- if cert, ok := c.NameToCertificate[wildcardName]; ok {
- return cert, nil
- }
- }
- }
-
- for _, cert := range c.Certificates {
- if err := clientHello.SupportsCertificate(&cert); err == nil {
- return &cert, nil
- }
- }
-
- // If nothing matches, return the first certificate.
- return &c.Certificates[0], nil
-}
-
-// SupportsCertificate returns nil if the provided certificate is supported by
-// the client that sent the ClientHello. Otherwise, it returns an error
-// describing the reason for the incompatibility.
-//
-// If this ClientHelloInfo was passed to a GetConfigForClient or GetCertificate
-// callback, this method will take into account the associated Config. Note that
-// if GetConfigForClient returns a different Config, the change can't be
-// accounted for by this method.
-//
-// This function will call x509.ParseCertificate unless c.Leaf is set, which can
-// incur a significant performance cost.
-func (chi *clientHelloInfo) SupportsCertificate(c *Certificate) error {
- // Note we don't currently support certificate_authorities nor
- // signature_algorithms_cert, and don't check the algorithms of the
- // signatures on the chain (which anyway are a SHOULD, see RFC 8446,
- // Section 4.4.2.2).
-
- config := chi.config
- if config == nil {
- config = &Config{}
- }
- conf := fromConfig(config)
- vers, ok := conf.mutualVersion(chi.SupportedVersions)
- if !ok {
- return errors.New("no mutually supported protocol versions")
- }
-
- // If the client specified the name they are trying to connect to, the
- // certificate needs to be valid for it.
- if chi.ServerName != "" {
- x509Cert, err := leafCertificate(c)
- if err != nil {
- return fmt.Errorf("failed to parse certificate: %w", err)
- }
- if err := x509Cert.VerifyHostname(chi.ServerName); err != nil {
- return fmt.Errorf("certificate is not valid for requested server name: %w", err)
- }
- }
-
- // supportsRSAFallback returns nil if the certificate and connection support
- // the static RSA key exchange, and unsupported otherwise. The logic for
- // supporting static RSA is completely disjoint from the logic for
- // supporting signed key exchanges, so we just check it as a fallback.
- supportsRSAFallback := func(unsupported error) error {
- // TLS 1.3 dropped support for the static RSA key exchange.
- if vers == VersionTLS13 {
- return unsupported
- }
- // The static RSA key exchange works by decrypting a challenge with the
- // RSA private key, not by signing, so check the PrivateKey implements
- // crypto.Decrypter, like *rsa.PrivateKey does.
- if priv, ok := c.PrivateKey.(crypto.Decrypter); ok {
- if _, ok := priv.Public().(*rsa.PublicKey); !ok {
- return unsupported
- }
- } else {
- return unsupported
- }
- // Finally, there needs to be a mutual cipher suite that uses the static
- // RSA key exchange instead of ECDHE.
- rsaCipherSuite := selectCipherSuite(chi.CipherSuites, conf.cipherSuites(), func(c *cipherSuite) bool {
- if c.flags&suiteECDHE != 0 {
- return false
- }
- if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
- })
- if rsaCipherSuite == nil {
- return unsupported
- }
- return nil
- }
-
- // If the client sent the signature_algorithms extension, ensure it supports
- // schemes we can use with this certificate and TLS version.
- if len(chi.SignatureSchemes) > 0 {
- if _, err := selectSignatureScheme(vers, c, chi.SignatureSchemes); err != nil {
- return supportsRSAFallback(err)
- }
- }
-
- // In TLS 1.3 we are done because supported_groups is only relevant to the
- // ECDHE computation, point format negotiation is removed, cipher suites are
- // only relevant to the AEAD choice, and static RSA does not exist.
- if vers == VersionTLS13 {
- return nil
- }
-
- // The only signed key exchange we support is ECDHE.
- if !supportsECDHE(conf, chi.SupportedCurves, chi.SupportedPoints) {
- return supportsRSAFallback(errors.New("client doesn't support ECDHE, can only use legacy RSA key exchange"))
- }
-
- var ecdsaCipherSuite bool
- if priv, ok := c.PrivateKey.(crypto.Signer); ok {
- switch pub := priv.Public().(type) {
- case *ecdsa.PublicKey:
- var curve CurveID
- switch pub.Curve {
- case elliptic.P256():
- curve = CurveP256
- case elliptic.P384():
- curve = CurveP384
- case elliptic.P521():
- curve = CurveP521
- default:
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
- var curveOk bool
- for _, c := range chi.SupportedCurves {
- if c == curve && conf.supportsCurve(c) {
- curveOk = true
- break
- }
- }
- if !curveOk {
- return errors.New("client doesn't support certificate curve")
- }
- ecdsaCipherSuite = true
- case ed25519.PublicKey:
- if vers < VersionTLS12 || len(chi.SignatureSchemes) == 0 {
- return errors.New("connection doesn't support Ed25519")
- }
- ecdsaCipherSuite = true
- case *rsa.PublicKey:
- default:
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
- } else {
- return supportsRSAFallback(unsupportedCertificateError(c))
- }
-
- // Make sure that there is a mutually supported cipher suite that works with
- // this certificate. Cipher suite selection will then apply the logic in
- // reverse to pick it. See also serverHandshakeState.cipherSuiteOk.
- cipherSuite := selectCipherSuite(chi.CipherSuites, conf.cipherSuites(), func(c *cipherSuite) bool {
- if c.flags&suiteECDHE == 0 {
- return false
- }
- if c.flags&suiteECSign != 0 {
- if !ecdsaCipherSuite {
- return false
- }
- } else {
- if ecdsaCipherSuite {
- return false
- }
- }
- if vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
- })
- if cipherSuite == nil {
- return supportsRSAFallback(errors.New("client doesn't support any cipher suites compatible with the certificate"))
- }
-
- return nil
-}
-
-// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate
-// from the CommonName and SubjectAlternateName fields of each of the leaf
-// certificates.
-//
-// Deprecated: NameToCertificate only allows associating a single certificate
-// with a given name. Leave that field nil to let the library select the first
-// compatible chain from Certificates.
-func (c *config) BuildNameToCertificate() {
- c.NameToCertificate = make(map[string]*Certificate)
- for i := range c.Certificates {
- cert := &c.Certificates[i]
- x509Cert, err := leafCertificate(cert)
- if err != nil {
- continue
- }
- // If SANs are *not* present, some clients will consider the certificate
- // valid for the name in the Common Name.
- if x509Cert.Subject.CommonName != "" && len(x509Cert.DNSNames) == 0 {
- c.NameToCertificate[x509Cert.Subject.CommonName] = cert
- }
- for _, san := range x509Cert.DNSNames {
- c.NameToCertificate[san] = cert
- }
- }
-}
-
-const (
- keyLogLabelTLS12 = "CLIENT_RANDOM"
- keyLogLabelEarlyTraffic = "CLIENT_EARLY_TRAFFIC_SECRET"
- keyLogLabelClientHandshake = "CLIENT_HANDSHAKE_TRAFFIC_SECRET"
- keyLogLabelServerHandshake = "SERVER_HANDSHAKE_TRAFFIC_SECRET"
- keyLogLabelClientTraffic = "CLIENT_TRAFFIC_SECRET_0"
- keyLogLabelServerTraffic = "SERVER_TRAFFIC_SECRET_0"
-)
-
-func (c *config) writeKeyLog(label string, clientRandom, secret []byte) error {
- if c.KeyLogWriter == nil {
- return nil
- }
-
- logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))
-
- writerMutex.Lock()
- _, err := c.KeyLogWriter.Write(logLine)
- writerMutex.Unlock()
-
- return err
-}
-
-// writerMutex protects all KeyLogWriters globally. It is rarely enabled,
-// and is only for debugging, so a global mutex saves space.
-var writerMutex sync.Mutex
-
-// A Certificate is a chain of one or more certificates, leaf first.
-type Certificate = tls.Certificate
-
-// leaf returns the parsed leaf certificate, either from c.Leaf or by parsing
-// the corresponding c.Certificate[0].
-func leafCertificate(c *Certificate) (*x509.Certificate, error) {
- if c.Leaf != nil {
- return c.Leaf, nil
- }
- return x509.ParseCertificate(c.Certificate[0])
-}
-
-type handshakeMessage interface {
- marshal() []byte
- unmarshal([]byte) bool
-}
-
-// lruSessionCache is a ClientSessionCache implementation that uses an LRU
-// caching strategy.
-type lruSessionCache struct {
- sync.Mutex
-
- m map[string]*list.Element
- q *list.List
- capacity int
-}
-
-type lruSessionCacheEntry struct {
- sessionKey string
- state *ClientSessionState
-}
-
-// NewLRUClientSessionCache returns a ClientSessionCache with the given
-// capacity that uses an LRU strategy. If capacity is < 1, a default capacity
-// is used instead.
-func NewLRUClientSessionCache(capacity int) ClientSessionCache {
- const defaultSessionCacheCapacity = 64
-
- if capacity < 1 {
- capacity = defaultSessionCacheCapacity
- }
- return &lruSessionCache{
- m: make(map[string]*list.Element),
- q: list.New(),
- capacity: capacity,
- }
-}
-
-// Put adds the provided (sessionKey, cs) pair to the cache. If cs is nil, the entry
-// corresponding to sessionKey is removed from the cache instead.
-func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) {
- c.Lock()
- defer c.Unlock()
-
- if elem, ok := c.m[sessionKey]; ok {
- if cs == nil {
- c.q.Remove(elem)
- delete(c.m, sessionKey)
- } else {
- entry := elem.Value.(*lruSessionCacheEntry)
- entry.state = cs
- c.q.MoveToFront(elem)
- }
- return
- }
-
- if c.q.Len() < c.capacity {
- entry := &lruSessionCacheEntry{sessionKey, cs}
- c.m[sessionKey] = c.q.PushFront(entry)
- return
- }
-
- elem := c.q.Back()
- entry := elem.Value.(*lruSessionCacheEntry)
- delete(c.m, entry.sessionKey)
- entry.sessionKey = sessionKey
- entry.state = cs
- c.q.MoveToFront(elem)
- c.m[sessionKey] = elem
-}
-
-// Get returns the ClientSessionState value associated with a given key. It
-// returns (nil, false) if no value is found.
-func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) {
- c.Lock()
- defer c.Unlock()
-
- if elem, ok := c.m[sessionKey]; ok {
- c.q.MoveToFront(elem)
- return elem.Value.(*lruSessionCacheEntry).state, true
- }
- return nil, false
-}
-
-var emptyConfig Config
-
-func defaultConfig() *Config {
- return &emptyConfig
-}
-
-func unexpectedMessageError(wanted, got interface{}) error {
- return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted)
-}
-
-func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool {
- for _, s := range supportedSignatureAlgorithms {
- if s == sigAlg {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/conn.go b/vendor/github.com/marten-seemann/qtls-go1-17/conn.go
deleted file mode 100644
index 70fad4652..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/conn.go
+++ /dev/null
@@ -1,1601 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TLS low level connection and record layer
-
-package qtls
-
-import (
- "bytes"
- "context"
- "crypto/cipher"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// A Conn represents a secured connection.
-// It implements the net.Conn interface.
-type Conn struct {
- // constant
- conn net.Conn
- isClient bool
- handshakeFn func(context.Context) error // (*Conn).clientHandshake or serverHandshake
-
- // handshakeStatus is 1 if the connection is currently transferring
- // application data (i.e. is not currently processing a handshake).
- // handshakeStatus == 1 implies handshakeErr == nil.
- // This field is only to be accessed with sync/atomic.
- handshakeStatus uint32
- // constant after handshake; protected by handshakeMutex
- handshakeMutex sync.Mutex
- handshakeErr error // error resulting from handshake
- vers uint16 // TLS version
- haveVers bool // version has been negotiated
- config *config // configuration passed to constructor
- // handshakes counts the number of handshakes performed on the
- // connection so far. If renegotiation is disabled then this is either
- // zero or one.
- extraConfig *ExtraConfig
-
- handshakes int
- didResume bool // whether this connection was a session resumption
- cipherSuite uint16
- ocspResponse []byte // stapled OCSP response
- scts [][]byte // signed certificate timestamps from server
- peerCertificates []*x509.Certificate
- // verifiedChains contains the certificate chains that we built, as
- // opposed to the ones presented by the server.
- verifiedChains [][]*x509.Certificate
- // serverName contains the server name indicated by the client, if any.
- serverName string
- // secureRenegotiation is true if the server echoed the secure
- // renegotiation extension. (This is meaningless as a server because
- // renegotiation is not supported in that case.)
- secureRenegotiation bool
- // ekm is a closure for exporting keying material.
- ekm func(label string, context []byte, length int) ([]byte, error)
- // For the client:
- // resumptionSecret is the resumption_master_secret for handling
- // NewSessionTicket messages. nil if config.SessionTicketsDisabled.
- // For the server:
- // resumptionSecret is the resumption_master_secret for generating
- // NewSessionTicket messages. Only used when the alternative record
- // layer is set. nil if config.SessionTicketsDisabled.
- resumptionSecret []byte
-
- // ticketKeys is the set of active session ticket keys for this
- // connection. The first one is used to encrypt new tickets and
- // all are tried to decrypt tickets.
- ticketKeys []ticketKey
-
- // clientFinishedIsFirst is true if the client sent the first Finished
- // message during the most recent handshake. This is recorded because
- // the first transmitted Finished message is the tls-unique
- // channel-binding value.
- clientFinishedIsFirst bool
-
- // closeNotifyErr is any error from sending the alertCloseNotify record.
- closeNotifyErr error
- // closeNotifySent is true if the Conn attempted to send an
- // alertCloseNotify record.
- closeNotifySent bool
-
- // clientFinished and serverFinished contain the Finished message sent
- // by the client or server in the most recent handshake. This is
- // retained to support the renegotiation extension and tls-unique
- // channel-binding.
- clientFinished [12]byte
- serverFinished [12]byte
-
- // clientProtocol is the negotiated ALPN protocol.
- clientProtocol string
-
- // input/output
- in, out halfConn
- rawInput bytes.Buffer // raw input, starting with a record header
- input bytes.Reader // application data waiting to be read, from rawInput.Next
- hand bytes.Buffer // handshake data waiting to be read
- buffering bool // whether records are buffered in sendBuf
- sendBuf []byte // a buffer of records waiting to be sent
-
- // bytesSent counts the bytes of application data sent.
- // packetsSent counts packets.
- bytesSent int64
- packetsSent int64
-
- // retryCount counts the number of consecutive non-advancing records
- // received by Conn.readRecord. That is, records that neither advance the
- // handshake, nor deliver application data. Protected by in.Mutex.
- retryCount int
-
- // activeCall is an atomic int32; the low bit is whether Close has
- // been called. the rest of the bits are the number of goroutines
- // in Conn.Write.
- activeCall int32
-
- used0RTT bool
-
- tmp [16]byte
-}
-
-// Access to net.Conn methods.
-// Cannot just embed net.Conn because that would
-// export the struct field too.
-
-// LocalAddr returns the local network address.
-func (c *Conn) LocalAddr() net.Addr {
- return c.conn.LocalAddr()
-}
-
-// RemoteAddr returns the remote network address.
-func (c *Conn) RemoteAddr() net.Addr {
- return c.conn.RemoteAddr()
-}
-
-// SetDeadline sets the read and write deadlines associated with the connection.
-// A zero value for t means Read and Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
-func (c *Conn) SetDeadline(t time.Time) error {
- return c.conn.SetDeadline(t)
-}
-
-// SetReadDeadline sets the read deadline on the underlying connection.
-// A zero value for t means Read will not time out.
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return c.conn.SetReadDeadline(t)
-}
-
-// SetWriteDeadline sets the write deadline on the underlying connection.
-// A zero value for t means Write will not time out.
-// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return c.conn.SetWriteDeadline(t)
-}
-
-// A halfConn represents one direction of the record layer
-// connection, either sending or receiving.
-type halfConn struct {
- sync.Mutex
-
- err error // first permanent error
- version uint16 // protocol version
- cipher interface{} // cipher algorithm
- mac hash.Hash
- seq [8]byte // 64-bit sequence number
-
- scratchBuf [13]byte // to avoid allocs; interface method args escape
-
- nextCipher interface{} // next encryption state
- nextMac hash.Hash // next MAC algorithm
-
- trafficSecret []byte // current TLS 1.3 traffic secret
-
- setKeyCallback func(encLevel EncryptionLevel, suite *CipherSuiteTLS13, trafficSecret []byte)
-}
-
-type permanentError struct {
- err net.Error
-}
-
-func (e *permanentError) Error() string { return e.err.Error() }
-func (e *permanentError) Unwrap() error { return e.err }
-func (e *permanentError) Timeout() bool { return e.err.Timeout() }
-func (e *permanentError) Temporary() bool { return false }
-
-func (hc *halfConn) setErrorLocked(err error) error {
- if e, ok := err.(net.Error); ok {
- hc.err = &permanentError{err: e}
- } else {
- hc.err = err
- }
- return hc.err
-}
-
-// prepareCipherSpec sets the encryption and MAC states
-// that a subsequent changeCipherSpec will use.
-func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac hash.Hash) {
- hc.version = version
- hc.nextCipher = cipher
- hc.nextMac = mac
-}
-
-// changeCipherSpec changes the encryption and MAC states
-// to the ones previously passed to prepareCipherSpec.
-func (hc *halfConn) changeCipherSpec() error {
- if hc.nextCipher == nil || hc.version == VersionTLS13 {
- return alertInternalError
- }
- hc.cipher = hc.nextCipher
- hc.mac = hc.nextMac
- hc.nextCipher = nil
- hc.nextMac = nil
- for i := range hc.seq {
- hc.seq[i] = 0
- }
- return nil
-}
-
-func (hc *halfConn) exportKey(encLevel EncryptionLevel, suite *cipherSuiteTLS13, trafficSecret []byte) {
- if hc.setKeyCallback != nil {
- s := &CipherSuiteTLS13{
- ID: suite.id,
- KeyLen: suite.keyLen,
- Hash: suite.hash,
- AEAD: func(key, fixedNonce []byte) cipher.AEAD { return suite.aead(key, fixedNonce) },
- }
- hc.setKeyCallback(encLevel, s, trafficSecret)
- }
-}
-
-func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, secret []byte) {
- hc.trafficSecret = secret
- key, iv := suite.trafficKey(secret)
- hc.cipher = suite.aead(key, iv)
- for i := range hc.seq {
- hc.seq[i] = 0
- }
-}
-
-// incSeq increments the sequence number.
-func (hc *halfConn) incSeq() {
- for i := 7; i >= 0; i-- {
- hc.seq[i]++
- if hc.seq[i] != 0 {
- return
- }
- }
-
- // Not allowed to let sequence number wrap.
- // Instead, must renegotiate before it does.
- // Not likely enough to bother.
- panic("TLS: sequence number wraparound")
-}
-
-// explicitNonceLen returns the number of bytes of explicit nonce or IV included
-// in each record. Explicit nonces are present only in CBC modes after TLS 1.0
-// and in certain AEAD modes in TLS 1.2.
-func (hc *halfConn) explicitNonceLen() int {
- if hc.cipher == nil {
- return 0
- }
-
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- return 0
- case aead:
- return c.explicitNonceLen()
- case cbcMode:
- // TLS 1.1 introduced a per-record explicit IV to fix the BEAST attack.
- if hc.version >= VersionTLS11 {
- return c.BlockSize()
- }
- return 0
- default:
- panic("unknown cipher type")
- }
-}
-
-// extractPadding returns, in constant time, the length of the padding to remove
-// from the end of payload. It also returns a byte which is equal to 255 if the
-// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2.
-func extractPadding(payload []byte) (toRemove int, good byte) {
- if len(payload) < 1 {
- return 0, 0
- }
-
- paddingLen := payload[len(payload)-1]
- t := uint(len(payload)-1) - uint(paddingLen)
- // if len(payload) >= (paddingLen - 1) then the MSB of t is zero
- good = byte(int32(^t) >> 31)
-
- // The maximum possible padding length plus the actual length field
- toCheck := 256
- // The length of the padded data is public, so we can use an if here
- if toCheck > len(payload) {
- toCheck = len(payload)
- }
-
- for i := 0; i < toCheck; i++ {
- t := uint(paddingLen) - uint(i)
- // if i <= paddingLen then the MSB of t is zero
- mask := byte(int32(^t) >> 31)
- b := payload[len(payload)-1-i]
- good &^= mask&paddingLen ^ mask&b
- }
-
- // We AND together the bits of good and replicate the result across
- // all the bits.
- good &= good << 4
- good &= good << 2
- good &= good << 1
- good = uint8(int8(good) >> 7)
-
- // Zero the padding length on error. This ensures any unchecked bytes
- // are included in the MAC. Otherwise, an attacker that could
- // distinguish MAC failures from padding failures could mount an attack
- // similar to POODLE in SSL 3.0: given a good ciphertext that uses a
- // full block's worth of padding, replace the final block with another
- // block. If the MAC check passed but the padding check failed, the
- // last byte of that block decrypted to the block size.
- //
- // See also macAndPaddingGood logic below.
- paddingLen &= good
-
- toRemove = int(paddingLen) + 1
- return
-}
-
-func roundUp(a, b int) int {
- return a + (b-a%b)%b
-}
-
-// cbcMode is an interface for block ciphers using cipher block chaining.
-type cbcMode interface {
- cipher.BlockMode
- SetIV([]byte)
-}
-
-// decrypt authenticates and decrypts the record if protection is active at
-// this stage. The returned plaintext might overlap with the input.
-func (hc *halfConn) decrypt(record []byte) ([]byte, recordType, error) {
- var plaintext []byte
- typ := recordType(record[0])
- payload := record[recordHeaderLen:]
-
- // In TLS 1.3, change_cipher_spec messages are to be ignored without being
- // decrypted. See RFC 8446, Appendix D.4.
- if hc.version == VersionTLS13 && typ == recordTypeChangeCipherSpec {
- return payload, typ, nil
- }
-
- paddingGood := byte(255)
- paddingLen := 0
-
- explicitNonceLen := hc.explicitNonceLen()
-
- if hc.cipher != nil {
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- c.XORKeyStream(payload, payload)
- case aead:
- if len(payload) < explicitNonceLen {
- return nil, 0, alertBadRecordMAC
- }
- nonce := payload[:explicitNonceLen]
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
- payload = payload[explicitNonceLen:]
-
- var additionalData []byte
- if hc.version == VersionTLS13 {
- additionalData = record[:recordHeaderLen]
- } else {
- additionalData = append(hc.scratchBuf[:0], hc.seq[:]...)
- additionalData = append(additionalData, record[:3]...)
- n := len(payload) - c.Overhead()
- additionalData = append(additionalData, byte(n>>8), byte(n))
- }
-
- var err error
- plaintext, err = c.Open(payload[:0], nonce, payload, additionalData)
- if err != nil {
- return nil, 0, alertBadRecordMAC
- }
- case cbcMode:
- blockSize := c.BlockSize()
- minPayload := explicitNonceLen + roundUp(hc.mac.Size()+1, blockSize)
- if len(payload)%blockSize != 0 || len(payload) < minPayload {
- return nil, 0, alertBadRecordMAC
- }
-
- if explicitNonceLen > 0 {
- c.SetIV(payload[:explicitNonceLen])
- payload = payload[explicitNonceLen:]
- }
- c.CryptBlocks(payload, payload)
-
- // In a limited attempt to protect against CBC padding oracles like
- // Lucky13, the data past paddingLen (which is secret) is passed to
- // the MAC function as extra data, to be fed into the HMAC after
- // computing the digest. This makes the MAC roughly constant time as
- // long as the digest computation is constant time and does not
- // affect the subsequent write, modulo cache effects.
- paddingLen, paddingGood = extractPadding(payload)
- default:
- panic("unknown cipher type")
- }
-
- if hc.version == VersionTLS13 {
- if typ != recordTypeApplicationData {
- return nil, 0, alertUnexpectedMessage
- }
- if len(plaintext) > maxPlaintext+1 {
- return nil, 0, alertRecordOverflow
- }
- // Remove padding and find the ContentType scanning from the end.
- for i := len(plaintext) - 1; i >= 0; i-- {
- if plaintext[i] != 0 {
- typ = recordType(plaintext[i])
- plaintext = plaintext[:i]
- break
- }
- if i == 0 {
- return nil, 0, alertUnexpectedMessage
- }
- }
- }
- } else {
- plaintext = payload
- }
-
- if hc.mac != nil {
- macSize := hc.mac.Size()
- if len(payload) < macSize {
- return nil, 0, alertBadRecordMAC
- }
-
- n := len(payload) - macSize - paddingLen
- n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 }
- record[3] = byte(n >> 8)
- record[4] = byte(n)
- remoteMAC := payload[n : n+macSize]
- localMAC := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload[:n], payload[n+macSize:])
-
- // This is equivalent to checking the MACs and paddingGood
- // separately, but in constant-time to prevent distinguishing
- // padding failures from MAC failures. Depending on what value
- // of paddingLen was returned on bad padding, distinguishing
- // bad MAC from bad padding can lead to an attack.
- //
- // See also the logic at the end of extractPadding.
- macAndPaddingGood := subtle.ConstantTimeCompare(localMAC, remoteMAC) & int(paddingGood)
- if macAndPaddingGood != 1 {
- return nil, 0, alertBadRecordMAC
- }
-
- plaintext = payload[:n]
- }
-
- hc.incSeq()
- return plaintext, typ, nil
-}
-
-func (c *Conn) setAlternativeRecordLayer() {
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- c.in.setKeyCallback = c.extraConfig.AlternativeRecordLayer.SetReadKey
- c.out.setKeyCallback = c.extraConfig.AlternativeRecordLayer.SetWriteKey
- }
-}
-
-// sliceForAppend extends the input slice by n bytes. head is the full extended
-// slice, while tail is the appended part. If the original slice has sufficient
-// capacity no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// encrypt encrypts payload, adding the appropriate nonce and/or MAC, and
-// appends it to record, which must already contain the record header.
-func (hc *halfConn) encrypt(record, payload []byte, rand io.Reader) ([]byte, error) {
- if hc.cipher == nil {
- return append(record, payload...), nil
- }
-
- var explicitNonce []byte
- if explicitNonceLen := hc.explicitNonceLen(); explicitNonceLen > 0 {
- record, explicitNonce = sliceForAppend(record, explicitNonceLen)
- if _, isCBC := hc.cipher.(cbcMode); !isCBC && explicitNonceLen < 16 {
- // The AES-GCM construction in TLS has an explicit nonce so that the
- // nonce can be random. However, the nonce is only 8 bytes which is
- // too small for a secure, random nonce. Therefore we use the
- // sequence number as the nonce. The 3DES-CBC construction also has
- // an 8 bytes nonce but its nonces must be unpredictable (see RFC
- // 5246, Appendix F.3), forcing us to use randomness. That's not
- // 3DES' biggest problem anyway because the birthday bound on block
- // collision is reached first due to its similarly small block size
- // (see the Sweet32 attack).
- copy(explicitNonce, hc.seq[:])
- } else {
- if _, err := io.ReadFull(rand, explicitNonce); err != nil {
- return nil, err
- }
- }
- }
-
- var dst []byte
- switch c := hc.cipher.(type) {
- case cipher.Stream:
- mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
- record, dst = sliceForAppend(record, len(payload)+len(mac))
- c.XORKeyStream(dst[:len(payload)], payload)
- c.XORKeyStream(dst[len(payload):], mac)
- case aead:
- nonce := explicitNonce
- if len(nonce) == 0 {
- nonce = hc.seq[:]
- }
-
- if hc.version == VersionTLS13 {
- record = append(record, payload...)
-
- // Encrypt the actual ContentType and replace the plaintext one.
- record = append(record, record[0])
- record[0] = byte(recordTypeApplicationData)
-
- n := len(payload) + 1 + c.Overhead()
- record[3] = byte(n >> 8)
- record[4] = byte(n)
-
- record = c.Seal(record[:recordHeaderLen],
- nonce, record[recordHeaderLen:], record[:recordHeaderLen])
- } else {
- additionalData := append(hc.scratchBuf[:0], hc.seq[:]...)
- additionalData = append(additionalData, record[:recordHeaderLen]...)
- record = c.Seal(record, nonce, payload, additionalData)
- }
- case cbcMode:
- mac := tls10MAC(hc.mac, hc.scratchBuf[:0], hc.seq[:], record[:recordHeaderLen], payload, nil)
- blockSize := c.BlockSize()
- plaintextLen := len(payload) + len(mac)
- paddingLen := blockSize - plaintextLen%blockSize
- record, dst = sliceForAppend(record, plaintextLen+paddingLen)
- copy(dst, payload)
- copy(dst[len(payload):], mac)
- for i := plaintextLen; i < len(dst); i++ {
- dst[i] = byte(paddingLen - 1)
- }
- if len(explicitNonce) > 0 {
- c.SetIV(explicitNonce)
- }
- c.CryptBlocks(dst, dst)
- default:
- panic("unknown cipher type")
- }
-
- // Update length to include nonce, MAC and any block padding needed.
- n := len(record) - recordHeaderLen
- record[3] = byte(n >> 8)
- record[4] = byte(n)
- hc.incSeq()
-
- return record, nil
-}
-
-// RecordHeaderError is returned when a TLS record header is invalid.
-type RecordHeaderError struct {
- // Msg contains a human readable string that describes the error.
- Msg string
- // RecordHeader contains the five bytes of TLS record header that
- // triggered the error.
- RecordHeader [5]byte
- // Conn provides the underlying net.Conn in the case that a client
- // sent an initial handshake that didn't look like TLS.
- // It is nil if there's already been a handshake or a TLS alert has
- // been written to the connection.
- Conn net.Conn
-}
-
-func (e RecordHeaderError) Error() string { return "tls: " + e.Msg }
-
-func (c *Conn) newRecordHeaderError(conn net.Conn, msg string) (err RecordHeaderError) {
- err.Msg = msg
- err.Conn = conn
- copy(err.RecordHeader[:], c.rawInput.Bytes())
- return err
-}
-
-func (c *Conn) readRecord() error {
- return c.readRecordOrCCS(false)
-}
-
-func (c *Conn) readChangeCipherSpec() error {
- return c.readRecordOrCCS(true)
-}
-
-// readRecordOrCCS reads one or more TLS records from the connection and
-// updates the record layer state. Some invariants:
-// * c.in must be locked
-// * c.input must be empty
-// During the handshake one and only one of the following will happen:
-// - c.hand grows
-// - c.in.changeCipherSpec is called
-// - an error is returned
-// After the handshake one and only one of the following will happen:
-// - c.hand grows
-// - c.input is set
-// - an error is returned
-func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error {
- if c.in.err != nil {
- return c.in.err
- }
- handshakeComplete := c.handshakeComplete()
-
- // This function modifies c.rawInput, which owns the c.input memory.
- if c.input.Len() != 0 {
- return c.in.setErrorLocked(errors.New("tls: internal error: attempted to read record with pending application data"))
- }
- c.input.Reset(nil)
-
- // Read header, payload.
- if err := c.readFromUntil(c.conn, recordHeaderLen); err != nil {
- // RFC 8446, Section 6.1 suggests that EOF without an alertCloseNotify
- // is an error, but popular web sites seem to do this, so we accept it
- // if and only if at the record boundary.
- if err == io.ErrUnexpectedEOF && c.rawInput.Len() == 0 {
- err = io.EOF
- }
- if e, ok := err.(net.Error); !ok || !e.Temporary() {
- c.in.setErrorLocked(err)
- }
- return err
- }
- hdr := c.rawInput.Bytes()[:recordHeaderLen]
- typ := recordType(hdr[0])
-
- // No valid TLS record has a type of 0x80, however SSLv2 handshakes
- // start with a uint16 length where the MSB is set and the first record
- // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests
- // an SSLv2 client.
- if !handshakeComplete && typ == 0x80 {
- c.sendAlert(alertProtocolVersion)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, "unsupported SSLv2 handshake received"))
- }
-
- vers := uint16(hdr[1])<<8 | uint16(hdr[2])
- n := int(hdr[3])<<8 | int(hdr[4])
- if c.haveVers && c.vers != VersionTLS13 && vers != c.vers {
- c.sendAlert(alertProtocolVersion)
- msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
- }
- if !c.haveVers {
- // First message, be extra suspicious: this might not be a TLS
- // client. Bail out before reading a full 'body', if possible.
- // The current max version is 3.3 so if the version is >= 16.0,
- // it's probably not real.
- if (typ != recordTypeAlert && typ != recordTypeHandshake) || vers >= 0x1000 {
- return c.in.setErrorLocked(c.newRecordHeaderError(c.conn, "first record does not look like a TLS handshake"))
- }
- }
- if c.vers == VersionTLS13 && n > maxCiphertextTLS13 || n > maxCiphertext {
- c.sendAlert(alertRecordOverflow)
- msg := fmt.Sprintf("oversized record received with length %d", n)
- return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg))
- }
- if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
- if e, ok := err.(net.Error); !ok || !e.Temporary() {
- c.in.setErrorLocked(err)
- }
- return err
- }
-
- // Process message.
- record := c.rawInput.Next(recordHeaderLen + n)
- data, typ, err := c.in.decrypt(record)
- if err != nil {
- return c.in.setErrorLocked(c.sendAlert(err.(alert)))
- }
- if len(data) > maxPlaintext {
- return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow))
- }
-
- // Application Data messages are always protected.
- if c.in.cipher == nil && typ == recordTypeApplicationData {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- if typ != recordTypeAlert && typ != recordTypeChangeCipherSpec && len(data) > 0 {
- // This is a state-advancing message: reset the retry count.
- c.retryCount = 0
- }
-
- // Handshake messages MUST NOT be interleaved with other record types in TLS 1.3.
- if c.vers == VersionTLS13 && typ != recordTypeHandshake && c.hand.Len() > 0 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- switch typ {
- default:
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
-
- case recordTypeAlert:
- if len(data) != 2 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- if alert(data[1]) == alertCloseNotify {
- return c.in.setErrorLocked(io.EOF)
- }
- if c.vers == VersionTLS13 {
- return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
- }
- switch data[0] {
- case alertLevelWarning:
- // Drop the record on the floor and retry.
- return c.retryReadRecord(expectChangeCipherSpec)
- case alertLevelError:
- return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
- default:
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- case recordTypeChangeCipherSpec:
- if len(data) != 1 || data[0] != 1 {
- return c.in.setErrorLocked(c.sendAlert(alertDecodeError))
- }
- // Handshake messages are not allowed to fragment across the CCS.
- if c.hand.Len() > 0 {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- // In TLS 1.3, change_cipher_spec records are ignored until the
- // Finished. See RFC 8446, Appendix D.4. Note that according to Section
- // 5, a server can send a ChangeCipherSpec before its ServerHello, when
- // c.vers is still unset. That's not useful though and suspicious if the
- // server then selects a lower protocol version, so don't allow that.
- if c.vers == VersionTLS13 {
- return c.retryReadRecord(expectChangeCipherSpec)
- }
- if !expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- if err := c.in.changeCipherSpec(); err != nil {
- return c.in.setErrorLocked(c.sendAlert(err.(alert)))
- }
-
- case recordTypeApplicationData:
- if !handshakeComplete || expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- // Some OpenSSL servers send empty records in order to randomize the
- // CBC IV. Ignore a limited number of empty records.
- if len(data) == 0 {
- return c.retryReadRecord(expectChangeCipherSpec)
- }
- // Note that data is owned by c.rawInput, following the Next call above,
- // to avoid copying the plaintext. This is safe because c.rawInput is
- // not read from or written to until c.input is drained.
- c.input.Reset(data)
-
- case recordTypeHandshake:
- if len(data) == 0 || expectChangeCipherSpec {
- return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- c.hand.Write(data)
- }
-
- return nil
-}
-
-// retryReadRecord recurses into readRecordOrCCS to drop a non-advancing record, like
-// a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3.
-func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error {
- c.retryCount++
- if c.retryCount > maxUselessRecords {
- c.sendAlert(alertUnexpectedMessage)
- return c.in.setErrorLocked(errors.New("tls: too many ignored records"))
- }
- return c.readRecordOrCCS(expectChangeCipherSpec)
-}
-
-// atLeastReader reads from R, stopping with EOF once at least N bytes have been
-// read. It is different from an io.LimitedReader in that it doesn't cut short
-// the last Read call, and in that it considers an early EOF an error.
-type atLeastReader struct {
- R io.Reader
- N int64
-}
-
-func (r *atLeastReader) Read(p []byte) (int, error) {
- if r.N <= 0 {
- return 0, io.EOF
- }
- n, err := r.R.Read(p)
- r.N -= int64(n) // won't underflow unless len(p) >= n > 9223372036854775809
- if r.N > 0 && err == io.EOF {
- return n, io.ErrUnexpectedEOF
- }
- if r.N <= 0 && err == nil {
- return n, io.EOF
- }
- return n, err
-}
-
-// readFromUntil reads from r into c.rawInput until c.rawInput contains
-// at least n bytes or else returns an error.
-func (c *Conn) readFromUntil(r io.Reader, n int) error {
- if c.rawInput.Len() >= n {
- return nil
- }
- needs := n - c.rawInput.Len()
- // There might be extra input waiting on the wire. Make a best effort
- // attempt to fetch it so that it can be used in (*Conn).Read to
- // "predict" closeNotify alerts.
- c.rawInput.Grow(needs + bytes.MinRead)
- _, err := c.rawInput.ReadFrom(&atLeastReader{r, int64(needs)})
- return err
-}
-
-// sendAlert sends a TLS alert message.
-func (c *Conn) sendAlertLocked(err alert) error {
- switch err {
- case alertNoRenegotiation, alertCloseNotify:
- c.tmp[0] = alertLevelWarning
- default:
- c.tmp[0] = alertLevelError
- }
- c.tmp[1] = byte(err)
-
- _, writeErr := c.writeRecordLocked(recordTypeAlert, c.tmp[0:2])
- if err == alertCloseNotify {
- // closeNotify is a special case in that it isn't an error.
- return writeErr
- }
-
- return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
-}
-
-// sendAlert sends a TLS alert message.
-func (c *Conn) sendAlert(err alert) error {
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- c.extraConfig.AlternativeRecordLayer.SendAlert(uint8(err))
- return &net.OpError{Op: "local error", Err: err}
- }
-
- c.out.Lock()
- defer c.out.Unlock()
- return c.sendAlertLocked(err)
-}
-
-const (
- // tcpMSSEstimate is a conservative estimate of the TCP maximum segment
- // size (MSS). A constant is used, rather than querying the kernel for
- // the actual MSS, to avoid complexity. The value here is the IPv6
- // minimum MTU (1280 bytes) minus the overhead of an IPv6 header (40
- // bytes) and a TCP header with timestamps (32 bytes).
- tcpMSSEstimate = 1208
-
- // recordSizeBoostThreshold is the number of bytes of application data
- // sent after which the TLS record size will be increased to the
- // maximum.
- recordSizeBoostThreshold = 128 * 1024
-)
-
-// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the
-// next application data record. There is the following trade-off:
-//
-// - For latency-sensitive applications, such as web browsing, each TLS
-// record should fit in one TCP segment.
-// - For throughput-sensitive applications, such as large file transfers,
-// larger TLS records better amortize framing and encryption overheads.
-//
-// A simple heuristic that works well in practice is to use small records for
-// the first 1MB of data, then use larger records for subsequent data, and
-// reset back to smaller records after the connection becomes idle. See "High
-// Performance Web Networking", Chapter 4, or:
-// https://www.igvita.com/2013/10/24/optimizing-tls-record-size-and-buffering-latency/
-//
-// In the interests of simplicity and determinism, this code does not attempt
-// to reset the record size once the connection is idle, however.
-func (c *Conn) maxPayloadSizeForWrite(typ recordType) int {
- if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData {
- return maxPlaintext
- }
-
- if c.bytesSent >= recordSizeBoostThreshold {
- return maxPlaintext
- }
-
- // Subtract TLS overheads to get the maximum payload size.
- payloadBytes := tcpMSSEstimate - recordHeaderLen - c.out.explicitNonceLen()
- if c.out.cipher != nil {
- switch ciph := c.out.cipher.(type) {
- case cipher.Stream:
- payloadBytes -= c.out.mac.Size()
- case cipher.AEAD:
- payloadBytes -= ciph.Overhead()
- case cbcMode:
- blockSize := ciph.BlockSize()
- // The payload must fit in a multiple of blockSize, with
- // room for at least one padding byte.
- payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
- // The MAC is appended before padding so affects the
- // payload size directly.
- payloadBytes -= c.out.mac.Size()
- default:
- panic("unknown cipher type")
- }
- }
- if c.vers == VersionTLS13 {
- payloadBytes-- // encrypted ContentType
- }
-
- // Allow packet growth in arithmetic progression up to max.
- pkt := c.packetsSent
- c.packetsSent++
- if pkt > 1000 {
- return maxPlaintext // avoid overflow in multiply below
- }
-
- n := payloadBytes * int(pkt+1)
- if n > maxPlaintext {
- n = maxPlaintext
- }
- return n
-}
-
-func (c *Conn) write(data []byte) (int, error) {
- if c.buffering {
- c.sendBuf = append(c.sendBuf, data...)
- return len(data), nil
- }
-
- n, err := c.conn.Write(data)
- c.bytesSent += int64(n)
- return n, err
-}
-
-func (c *Conn) flush() (int, error) {
- if len(c.sendBuf) == 0 {
- return 0, nil
- }
-
- n, err := c.conn.Write(c.sendBuf)
- c.bytesSent += int64(n)
- c.sendBuf = nil
- c.buffering = false
- return n, err
-}
-
-// outBufPool pools the record-sized scratch buffers used by writeRecordLocked.
-var outBufPool = sync.Pool{
- New: func() interface{} {
- return new([]byte)
- },
-}
-
-// writeRecordLocked writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
- outBufPtr := outBufPool.Get().(*[]byte)
- outBuf := *outBufPtr
- defer func() {
- // You might be tempted to simplify this by just passing &outBuf to Put,
- // but that would make the local copy of the outBuf slice header escape
- // to the heap, causing an allocation. Instead, we keep around the
- // pointer to the slice header returned by Get, which is already on the
- // heap, and overwrite and return that.
- *outBufPtr = outBuf
- outBufPool.Put(outBufPtr)
- }()
-
- var n int
- for len(data) > 0 {
- m := len(data)
- if maxPayload := c.maxPayloadSizeForWrite(typ); m > maxPayload {
- m = maxPayload
- }
-
- _, outBuf = sliceForAppend(outBuf[:0], recordHeaderLen)
- outBuf[0] = byte(typ)
- vers := c.vers
- if vers == 0 {
- // Some TLS servers fail if the record version is
- // greater than TLS 1.0 for the initial ClientHello.
- vers = VersionTLS10
- } else if vers == VersionTLS13 {
- // TLS 1.3 froze the record layer version to 1.2.
- // See RFC 8446, Section 5.1.
- vers = VersionTLS12
- }
- outBuf[1] = byte(vers >> 8)
- outBuf[2] = byte(vers)
- outBuf[3] = byte(m >> 8)
- outBuf[4] = byte(m)
-
- var err error
- outBuf, err = c.out.encrypt(outBuf, data[:m], c.config.rand())
- if err != nil {
- return n, err
- }
- if _, err := c.write(outBuf); err != nil {
- return n, err
- }
- n += m
- data = data[m:]
- }
-
- if typ == recordTypeChangeCipherSpec && c.vers != VersionTLS13 {
- if err := c.out.changeCipherSpec(); err != nil {
- return n, c.sendAlertLocked(err.(alert))
- }
- }
-
- return n, nil
-}
-
-// writeRecord writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- if typ == recordTypeChangeCipherSpec {
- return len(data), nil
- }
- return c.extraConfig.AlternativeRecordLayer.WriteRecord(data)
- }
-
- c.out.Lock()
- defer c.out.Unlock()
-
- return c.writeRecordLocked(typ, data)
-}
-
-// readHandshake reads the next handshake message from
-// the record layer.
-func (c *Conn) readHandshake() (interface{}, error) {
- var data []byte
- if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- var err error
- data, err = c.extraConfig.AlternativeRecordLayer.ReadHandshakeMessage()
- if err != nil {
- return nil, err
- }
- } else {
- for c.hand.Len() < 4 {
- if err := c.readRecord(); err != nil {
- return nil, err
- }
- }
-
- data = c.hand.Bytes()
- n := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
- if n > maxHandshake {
- c.sendAlertLocked(alertInternalError)
- return nil, c.in.setErrorLocked(fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake))
- }
- for c.hand.Len() < 4+n {
- if err := c.readRecord(); err != nil {
- return nil, err
- }
- }
- data = c.hand.Next(4 + n)
- }
- var m handshakeMessage
- switch data[0] {
- case typeHelloRequest:
- m = new(helloRequestMsg)
- case typeClientHello:
- m = new(clientHelloMsg)
- case typeServerHello:
- m = new(serverHelloMsg)
- case typeNewSessionTicket:
- if c.vers == VersionTLS13 {
- m = new(newSessionTicketMsgTLS13)
- } else {
- m = new(newSessionTicketMsg)
- }
- case typeCertificate:
- if c.vers == VersionTLS13 {
- m = new(certificateMsgTLS13)
- } else {
- m = new(certificateMsg)
- }
- case typeCertificateRequest:
- if c.vers == VersionTLS13 {
- m = new(certificateRequestMsgTLS13)
- } else {
- m = &certificateRequestMsg{
- hasSignatureAlgorithm: c.vers >= VersionTLS12,
- }
- }
- case typeCertificateStatus:
- m = new(certificateStatusMsg)
- case typeServerKeyExchange:
- m = new(serverKeyExchangeMsg)
- case typeServerHelloDone:
- m = new(serverHelloDoneMsg)
- case typeClientKeyExchange:
- m = new(clientKeyExchangeMsg)
- case typeCertificateVerify:
- m = &certificateVerifyMsg{
- hasSignatureAlgorithm: c.vers >= VersionTLS12,
- }
- case typeFinished:
- m = new(finishedMsg)
- case typeEncryptedExtensions:
- m = new(encryptedExtensionsMsg)
- case typeEndOfEarlyData:
- m = new(endOfEarlyDataMsg)
- case typeKeyUpdate:
- m = new(keyUpdateMsg)
- default:
- return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
-
- // The handshake message unmarshalers
- // expect to be able to keep references to data,
- // so pass in a fresh copy that won't be overwritten.
- data = append([]byte(nil), data...)
-
- if !m.unmarshal(data) {
- return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
- }
- return m, nil
-}
-
-var (
- errShutdown = errors.New("tls: protocol is shutdown")
-)
-
-// Write writes data to the connection.
-//
-// As Write calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Write is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
-func (c *Conn) Write(b []byte) (int, error) {
- // interlock with Close below
- for {
- x := atomic.LoadInt32(&c.activeCall)
- if x&1 != 0 {
- return 0, net.ErrClosed
- }
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
- break
- }
- }
- defer atomic.AddInt32(&c.activeCall, -2)
-
- if err := c.Handshake(); err != nil {
- return 0, err
- }
-
- c.out.Lock()
- defer c.out.Unlock()
-
- if err := c.out.err; err != nil {
- return 0, err
- }
-
- if !c.handshakeComplete() {
- return 0, alertInternalError
- }
-
- if c.closeNotifySent {
- return 0, errShutdown
- }
-
- // TLS 1.0 is susceptible to a chosen-plaintext
- // attack when using block mode ciphers due to predictable IVs.
- // This can be prevented by splitting each Application Data
- // record into two records, effectively randomizing the IV.
- //
- // https://www.openssl.org/~bodo/tls-cbc.txt
- // https://bugzilla.mozilla.org/show_bug.cgi?id=665814
- // https://www.imperialviolet.org/2012/01/15/beastfollowup.html
-
- var m int
- if len(b) > 1 && c.vers == VersionTLS10 {
- if _, ok := c.out.cipher.(cipher.BlockMode); ok {
- n, err := c.writeRecordLocked(recordTypeApplicationData, b[:1])
- if err != nil {
- return n, c.out.setErrorLocked(err)
- }
- m, b = 1, b[1:]
- }
- }
-
- n, err := c.writeRecordLocked(recordTypeApplicationData, b)
- return n + m, c.out.setErrorLocked(err)
-}
-
-// handleRenegotiation processes a HelloRequest handshake message.
-func (c *Conn) handleRenegotiation() error {
- if c.vers == VersionTLS13 {
- return errors.New("tls: internal error: unexpected renegotiation")
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- helloReq, ok := msg.(*helloRequestMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(helloReq, msg)
- }
-
- if !c.isClient {
- return c.sendAlert(alertNoRenegotiation)
- }
-
- switch c.config.Renegotiation {
- case RenegotiateNever:
- return c.sendAlert(alertNoRenegotiation)
- case RenegotiateOnceAsClient:
- if c.handshakes > 1 {
- return c.sendAlert(alertNoRenegotiation)
- }
- case RenegotiateFreelyAsClient:
- // Ok.
- default:
- c.sendAlert(alertInternalError)
- return errors.New("tls: unknown Renegotiation value")
- }
-
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- atomic.StoreUint32(&c.handshakeStatus, 0)
- if c.handshakeErr = c.clientHandshake(context.Background()); c.handshakeErr == nil {
- c.handshakes++
- }
- return c.handshakeErr
-}
-
-func (c *Conn) HandlePostHandshakeMessage() error {
- return c.handlePostHandshakeMessage()
-}
-
-// handlePostHandshakeMessage processes a handshake message arrived after the
-// handshake is complete. Up to TLS 1.2, it indicates the start of a renegotiation.
-func (c *Conn) handlePostHandshakeMessage() error {
- if c.vers != VersionTLS13 {
- return c.handleRenegotiation()
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- c.retryCount++
- if c.retryCount > maxUselessRecords {
- c.sendAlert(alertUnexpectedMessage)
- return c.in.setErrorLocked(errors.New("tls: too many non-advancing records"))
- }
-
- switch msg := msg.(type) {
- case *newSessionTicketMsgTLS13:
- return c.handleNewSessionTicket(msg)
- case *keyUpdateMsg:
- return c.handleKeyUpdate(msg)
- default:
- c.sendAlert(alertUnexpectedMessage)
- return fmt.Errorf("tls: received unexpected handshake message of type %T", msg)
- }
-}
-
-func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
- cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
- if cipherSuite == nil {
- return c.in.setErrorLocked(c.sendAlert(alertInternalError))
- }
-
- newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret)
- c.in.setTrafficSecret(cipherSuite, newSecret)
-
- if keyUpdate.updateRequested {
- c.out.Lock()
- defer c.out.Unlock()
-
- msg := &keyUpdateMsg{}
- _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
- if err != nil {
- // Surface the error at the next write.
- c.out.setErrorLocked(err)
- return nil
- }
-
- newSecret := cipherSuite.nextTrafficSecret(c.out.trafficSecret)
- c.out.setTrafficSecret(cipherSuite, newSecret)
- }
-
- return nil
-}
-
-// Read reads data from the connection.
-//
-// As Read calls Handshake, in order to prevent indefinite blocking a deadline
-// must be set for both Read and Write before Read is called when the handshake
-// has not yet completed. See SetDeadline, SetReadDeadline, and
-// SetWriteDeadline.
-func (c *Conn) Read(b []byte) (int, error) {
- if err := c.Handshake(); err != nil {
- return 0, err
- }
- if len(b) == 0 {
- // Put this after Handshake, in case people were calling
- // Read(nil) for the side effect of the Handshake.
- return 0, nil
- }
-
- c.in.Lock()
- defer c.in.Unlock()
-
- for c.input.Len() == 0 {
- if err := c.readRecord(); err != nil {
- return 0, err
- }
- for c.hand.Len() > 0 {
- if err := c.handlePostHandshakeMessage(); err != nil {
- return 0, err
- }
- }
- }
-
- n, _ := c.input.Read(b)
-
- // If a close-notify alert is waiting, read it so that we can return (n,
- // EOF) instead of (n, nil), to signal to the HTTP response reading
- // goroutine that the connection is now closed. This eliminates a race
- // where the HTTP response reading goroutine would otherwise not observe
- // the EOF until its next read, by which time a client goroutine might
- // have already tried to reuse the HTTP connection for a new request.
- // See https://golang.org/cl/76400046 and https://golang.org/issue/3514
- if n != 0 && c.input.Len() == 0 && c.rawInput.Len() > 0 &&
- recordType(c.rawInput.Bytes()[0]) == recordTypeAlert {
- if err := c.readRecord(); err != nil {
- return n, err // will be io.EOF on closeNotify
- }
- }
-
- return n, nil
-}
-
-// Close closes the connection.
-func (c *Conn) Close() error {
- // Interlock with Conn.Write above.
- var x int32
- for {
- x = atomic.LoadInt32(&c.activeCall)
- if x&1 != 0 {
- return net.ErrClosed
- }
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
- break
- }
- }
- if x != 0 {
- // io.Writer and io.Closer should not be used concurrently.
- // If Close is called while a Write is currently in-flight,
- // interpret that as a sign that this Close is really just
- // being used to break the Write and/or clean up resources and
- // avoid sending the alertCloseNotify, which may block
- // waiting on handshakeMutex or the c.out mutex.
- return c.conn.Close()
- }
-
- var alertErr error
- if c.handshakeComplete() {
- if err := c.closeNotify(); err != nil {
- alertErr = fmt.Errorf("tls: failed to send closeNotify alert (but connection was closed anyway): %w", err)
- }
- }
-
- if err := c.conn.Close(); err != nil {
- return err
- }
- return alertErr
-}
-
-var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake complete")
-
-// CloseWrite shuts down the writing side of the connection. It should only be
-// called once the handshake has completed and does not call CloseWrite on the
-// underlying connection. Most callers should just use Close.
-func (c *Conn) CloseWrite() error {
- if !c.handshakeComplete() {
- return errEarlyCloseWrite
- }
-
- return c.closeNotify()
-}
-
-func (c *Conn) closeNotify() error {
- c.out.Lock()
- defer c.out.Unlock()
-
- if !c.closeNotifySent {
- // Set a Write Deadline to prevent possibly blocking forever.
- c.SetWriteDeadline(time.Now().Add(time.Second * 5))
- c.closeNotifyErr = c.sendAlertLocked(alertCloseNotify)
- c.closeNotifySent = true
- // Any subsequent writes will fail.
- c.SetWriteDeadline(time.Now())
- }
- return c.closeNotifyErr
-}
-
-// Handshake runs the client or server handshake
-// protocol if it has not yet been run.
-//
-// Most uses of this package need not call Handshake explicitly: the
-// first Read or Write will call it automatically.
-//
-// For control over canceling or setting a timeout on a handshake, use
-// HandshakeContext or the Dialer's DialContext method instead.
-func (c *Conn) Handshake() error {
- return c.HandshakeContext(context.Background())
-}
-
-// HandshakeContext runs the client or server handshake
-// protocol if it has not yet been run.
-//
-// The provided Context must be non-nil. If the context is canceled before
-// the handshake is complete, the handshake is interrupted and an error is returned.
-// Once the handshake has completed, cancellation of the context will not affect the
-// connection.
-//
-// Most uses of this package need not call HandshakeContext explicitly: the
-// first Read or Write will call it automatically.
-func (c *Conn) HandshakeContext(ctx context.Context) error {
- // Delegate to unexported method for named return
- // without confusing documented signature.
- return c.handshakeContext(ctx)
-}
-
-func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
- // Fast sync/atomic-based exit if there is no handshake in flight and the
- // last one succeeded without an error. Avoids the expensive context setup
- // and mutex for most Read and Write calls.
- if c.handshakeComplete() {
- return nil
- }
-
- handshakeCtx, cancel := context.WithCancel(ctx)
- // Note: defer this before starting the "interrupter" goroutine
- // so that we can tell the difference between the input being canceled and
- // this cancellation. In the former case, we need to close the connection.
- defer cancel()
-
- // Start the "interrupter" goroutine, if this context might be canceled.
- // (The background context cannot).
- //
- // The interrupter goroutine waits for the input context to be done and
- // closes the connection if this happens before the function returns.
- if ctx.Done() != nil {
- done := make(chan struct{})
- interruptRes := make(chan error, 1)
- defer func() {
- close(done)
- if ctxErr := <-interruptRes; ctxErr != nil {
- // Return context error to user.
- ret = ctxErr
- }
- }()
- go func() {
- select {
- case <-handshakeCtx.Done():
- // Close the connection, discarding the error
- _ = c.conn.Close()
- interruptRes <- handshakeCtx.Err()
- case <-done:
- interruptRes <- nil
- }
- }()
- }
-
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- if err := c.handshakeErr; err != nil {
- return err
- }
- if c.handshakeComplete() {
- return nil
- }
-
- c.in.Lock()
- defer c.in.Unlock()
-
- c.handshakeErr = c.handshakeFn(handshakeCtx)
- if c.handshakeErr == nil {
- c.handshakes++
- } else {
- // If an error occurred during the handshake try to flush the
- // alert that might be left in the buffer.
- c.flush()
- }
-
- if c.handshakeErr == nil && !c.handshakeComplete() {
- c.handshakeErr = errors.New("tls: internal error: handshake should have had a result")
- }
- if c.handshakeErr != nil && c.handshakeComplete() {
- panic("tls: internal error: handshake returned an error but is marked successful")
- }
-
- return c.handshakeErr
-}
-
-// ConnectionState returns basic TLS details about the connection.
-func (c *Conn) ConnectionState() ConnectionState {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return c.connectionStateLocked()
-}
-
-// ConnectionStateWith0RTT returns basic TLS details (incl. 0-RTT status) about the connection.
-func (c *Conn) ConnectionStateWith0RTT() ConnectionStateWith0RTT {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return ConnectionStateWith0RTT{
- ConnectionState: c.connectionStateLocked(),
- Used0RTT: c.used0RTT,
- }
-}
-
-func (c *Conn) connectionStateLocked() ConnectionState {
- var state connectionState
- state.HandshakeComplete = c.handshakeComplete()
- state.Version = c.vers
- state.NegotiatedProtocol = c.clientProtocol
- state.DidResume = c.didResume
- state.NegotiatedProtocolIsMutual = true
- state.ServerName = c.serverName
- state.CipherSuite = c.cipherSuite
- state.PeerCertificates = c.peerCertificates
- state.VerifiedChains = c.verifiedChains
- state.SignedCertificateTimestamps = c.scts
- state.OCSPResponse = c.ocspResponse
- if !c.didResume && c.vers != VersionTLS13 {
- if c.clientFinishedIsFirst {
- state.TLSUnique = c.clientFinished[:]
- } else {
- state.TLSUnique = c.serverFinished[:]
- }
- }
- if c.config.Renegotiation != RenegotiateNever {
- state.ekm = noExportedKeyingMaterial
- } else {
- state.ekm = c.ekm
- }
- return toConnectionState(state)
-}
-
-// OCSPResponse returns the stapled OCSP response from the TLS server, if
-// any. (Only valid for client connections.)
-func (c *Conn) OCSPResponse() []byte {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
-
- return c.ocspResponse
-}
-
-// VerifyHostname checks that the peer certificate chain is valid for
-// connecting to host. If so, it returns nil; if not, it returns an error
-// describing the problem.
-func (c *Conn) VerifyHostname(host string) error {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- if !c.isClient {
- return errors.New("tls: VerifyHostname called on TLS server connection")
- }
- if !c.handshakeComplete() {
- return errors.New("tls: handshake has not yet been performed")
- }
- if len(c.verifiedChains) == 0 {
- return errors.New("tls: handshake did not verify certificate chain")
- }
- return c.peerCertificates[0].VerifyHostname(host)
-}
-
-func (c *Conn) handshakeComplete() bool {
- return atomic.LoadUint32(&c.handshakeStatus) == 1
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_client.go b/vendor/github.com/marten-seemann/qtls-go1-17/handshake_client.go
deleted file mode 100644
index ac5d0b4c2..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_client.go
+++ /dev/null
@@ -1,1111 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "strings"
- "sync/atomic"
- "time"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-const clientSessionStateVersion = 1
-
-type clientHandshakeState struct {
- c *Conn
- ctx context.Context
- serverHello *serverHelloMsg
- hello *clientHelloMsg
- suite *cipherSuite
- finishedHash finishedHash
- masterSecret []byte
- session *clientSessionState
-}
-
-func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
- config := c.config
- if len(config.ServerName) == 0 && !config.InsecureSkipVerify {
- return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config")
- }
-
- nextProtosLength := 0
- for _, proto := range config.NextProtos {
- if l := len(proto); l == 0 || l > 255 {
- return nil, nil, errors.New("tls: invalid NextProtos value")
- } else {
- nextProtosLength += 1 + l
- }
- }
- if nextProtosLength > 0xffff {
- return nil, nil, errors.New("tls: NextProtos values too large")
- }
-
- var supportedVersions []uint16
- var clientHelloVersion uint16
- if c.extraConfig.usesAlternativeRecordLayer() {
- if config.maxSupportedVersion() < VersionTLS13 {
- return nil, nil, errors.New("tls: MaxVersion prevents QUIC from using TLS 1.3")
- }
- // Only offer TLS 1.3 when QUIC is used.
- supportedVersions = []uint16{VersionTLS13}
- clientHelloVersion = VersionTLS13
- } else {
- supportedVersions = config.supportedVersions()
- if len(supportedVersions) == 0 {
- return nil, nil, errors.New("tls: no supported versions satisfy MinVersion and MaxVersion")
- }
- clientHelloVersion = config.maxSupportedVersion()
- }
-
- // The version at the beginning of the ClientHello was capped at TLS 1.2
- // for compatibility reasons. The supported_versions extension is used
- // to negotiate versions now. See RFC 8446, Section 4.2.1.
- if clientHelloVersion > VersionTLS12 {
- clientHelloVersion = VersionTLS12
- }
-
- hello := &clientHelloMsg{
- vers: clientHelloVersion,
- compressionMethods: []uint8{compressionNone},
- random: make([]byte, 32),
- ocspStapling: true,
- scts: true,
- serverName: hostnameInSNI(config.ServerName),
- supportedCurves: config.curvePreferences(),
- supportedPoints: []uint8{pointFormatUncompressed},
- secureRenegotiationSupported: true,
- alpnProtocols: config.NextProtos,
- supportedVersions: supportedVersions,
- }
-
- if c.handshakes > 0 {
- hello.secureRenegotiation = c.clientFinished[:]
- }
-
- preferenceOrder := cipherSuitesPreferenceOrder
- if !hasAESGCMHardwareSupport {
- preferenceOrder = cipherSuitesPreferenceOrderNoAES
- }
- configCipherSuites := config.cipherSuites()
- hello.cipherSuites = make([]uint16, 0, len(configCipherSuites))
-
- for _, suiteId := range preferenceOrder {
- suite := mutualCipherSuite(configCipherSuites, suiteId)
- if suite == nil {
- continue
- }
- // Don't advertise TLS 1.2-only cipher suites unless
- // we're attempting TLS 1.2.
- if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 {
- continue
- }
- hello.cipherSuites = append(hello.cipherSuites, suiteId)
- }
-
- _, err := io.ReadFull(config.rand(), hello.random)
- if err != nil {
- return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
- }
-
- // A random session ID is used to detect when the server accepted a ticket
- // and is resuming a session (see RFC 5077). In TLS 1.3, it's always set as
- // a compatibility measure (see RFC 8446, Section 4.1.2).
- if c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil {
- hello.sessionId = make([]byte, 32)
- if _, err := io.ReadFull(config.rand(), hello.sessionId); err != nil {
- return nil, nil, errors.New("tls: short read from Rand: " + err.Error())
- }
- }
-
- if hello.vers >= VersionTLS12 {
- hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- }
-
- var params ecdheParameters
- if hello.supportedVersions[0] == VersionTLS13 {
- var suites []uint16
- for _, suiteID := range configCipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- suites = append(suites, suiteID)
- }
- }
- }
- if len(suites) > 0 {
- hello.cipherSuites = suites
- } else {
- if hasAESGCMHardwareSupport {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
- } else {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
- }
- }
-
- curveID := config.curvePreferences()[0]
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err = generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return nil, nil, err
- }
- hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
- }
-
- if hello.supportedVersions[0] == VersionTLS13 && c.extraConfig != nil && c.extraConfig.GetExtensions != nil {
- hello.additionalExtensions = c.extraConfig.GetExtensions(typeClientHello)
- }
-
- return hello, params, nil
-}
-
-func (c *Conn) clientHandshake(ctx context.Context) (err error) {
- if c.config == nil {
- c.config = fromConfig(defaultConfig())
- }
- c.setAlternativeRecordLayer()
-
- // This may be a renegotiation handshake, in which case some fields
- // need to be reset.
- c.didResume = false
-
- hello, ecdheParams, err := c.makeClientHello()
- if err != nil {
- return err
- }
- c.serverName = hello.serverName
-
- cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
- if cacheKey != "" && session != nil {
- var deletedTicket bool
- if session.vers == VersionTLS13 && hello.earlyData && c.extraConfig != nil && c.extraConfig.Enable0RTT {
- // don't reuse a session ticket that enabled 0-RTT
- c.config.ClientSessionCache.Put(cacheKey, nil)
- deletedTicket = true
-
- if suite := cipherSuiteTLS13ByID(session.cipherSuite); suite != nil {
- h := suite.hash.New()
- h.Write(hello.marshal())
- clientEarlySecret := suite.deriveSecret(earlySecret, "c e traffic", h)
- c.out.exportKey(Encryption0RTT, suite, clientEarlySecret)
- if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hello.random, clientEarlySecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- }
- }
- if !deletedTicket {
- defer func() {
- // If we got a handshake failure when resuming a session, throw away
- // the session ticket. See RFC 5077, Section 3.2.
- //
- // RFC 8446 makes no mention of dropping tickets on failure, but it
- // does require servers to abort on invalid binders, so we need to
- // delete tickets to recover from a corrupted PSK.
- if err != nil {
- c.config.ClientSessionCache.Put(cacheKey, nil)
- }
- }()
- }
- }
-
- if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- serverHello, ok := msg.(*serverHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverHello, msg)
- }
-
- if err := c.pickTLSVersion(serverHello); err != nil {
- return err
- }
-
- // If we are negotiating a protocol version that's lower than what we
- // support, check for the server downgrade canaries.
- // See RFC 8446, Section 4.1.3.
- maxVers := c.config.maxSupportedVersion()
- tls12Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS12
- tls11Downgrade := string(serverHello.random[24:]) == downgradeCanaryTLS11
- if maxVers == VersionTLS13 && c.vers <= VersionTLS12 && (tls12Downgrade || tls11Downgrade) ||
- maxVers == VersionTLS12 && c.vers <= VersionTLS11 && tls11Downgrade {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: downgrade attempt detected, possibly due to a MitM attack or a broken middlebox")
- }
-
- if c.vers == VersionTLS13 {
- hs := &clientHandshakeStateTLS13{
- c: c,
- ctx: ctx,
- serverHello: serverHello,
- hello: hello,
- ecdheParams: ecdheParams,
- session: session,
- earlySecret: earlySecret,
- binderKey: binderKey,
- }
-
- // In TLS 1.3, session tickets are delivered after the handshake.
- return hs.handshake()
- }
-
- hs := &clientHandshakeState{
- c: c,
- ctx: ctx,
- serverHello: serverHello,
- hello: hello,
- session: session,
- }
-
- if err := hs.handshake(); err != nil {
- return err
- }
-
- // If we had a successful handshake and hs.session is different from
- // the one already cached - cache a new one.
- if cacheKey != "" && hs.session != nil && session != hs.session {
- c.config.ClientSessionCache.Put(cacheKey, toClientSessionState(hs.session))
- }
-
- return nil
-}
-
-// extract the app data saved in the session.nonce,
-// and set the session.nonce to the actual nonce value
-func (c *Conn) decodeSessionState(session *clientSessionState) (uint32 /* max early data */, []byte /* app data */, bool /* ok */) {
- s := cryptobyte.String(session.nonce)
- var version uint16
- if !s.ReadUint16(&version) {
- return 0, nil, false
- }
- if version != clientSessionStateVersion {
- return 0, nil, false
- }
- var maxEarlyData uint32
- if !s.ReadUint32(&maxEarlyData) {
- return 0, nil, false
- }
- var appData []byte
- if !readUint16LengthPrefixed(&s, &appData) {
- return 0, nil, false
- }
- var nonce []byte
- if !readUint16LengthPrefixed(&s, &nonce) {
- return 0, nil, false
- }
- session.nonce = nonce
- return maxEarlyData, appData, true
-}
-
-func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
- session *clientSessionState, earlySecret, binderKey []byte) {
- if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return "", nil, nil, nil
- }
-
- hello.ticketSupported = true
-
- if hello.supportedVersions[0] == VersionTLS13 {
- // Require DHE on resumption as it guarantees forward secrecy against
- // compromise of the session ticket key. See RFC 8446, Section 4.2.9.
- hello.pskModes = []uint8{pskModeDHE}
- }
-
- // Session resumption is not allowed if renegotiating because
- // renegotiation is primarily used to allow a client to send a client
- // certificate, which would be skipped if session resumption occurred.
- if c.handshakes != 0 {
- return "", nil, nil, nil
- }
-
- // Try to resume a previously negotiated TLS session, if available.
- cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
- sess, ok := c.config.ClientSessionCache.Get(cacheKey)
- if !ok || sess == nil {
- return cacheKey, nil, nil, nil
- }
- session = fromClientSessionState(sess)
-
- var appData []byte
- var maxEarlyData uint32
- if session.vers == VersionTLS13 {
- var ok bool
- maxEarlyData, appData, ok = c.decodeSessionState(session)
- if !ok { // delete it, if parsing failed
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
- }
-
- // Check that version used for the previous session is still valid.
- versOk := false
- for _, v := range hello.supportedVersions {
- if v == session.vers {
- versOk = true
- break
- }
- }
- if !versOk {
- return cacheKey, nil, nil, nil
- }
-
- // Check that the cached server certificate is not expired, and that it's
- // valid for the ServerName. This should be ensured by the cache key, but
- // protect the application from a faulty ClientSessionCache implementation.
- if !c.config.InsecureSkipVerify {
- if len(session.verifiedChains) == 0 {
- // The original connection had InsecureSkipVerify, while this doesn't.
- return cacheKey, nil, nil, nil
- }
- serverCert := session.serverCertificates[0]
- if c.config.time().After(serverCert.NotAfter) {
- // Expired certificate, delete the entry.
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
- if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
- return cacheKey, nil, nil, nil
- }
- }
-
- if session.vers != VersionTLS13 {
- // In TLS 1.2 the cipher suite must match the resumed session. Ensure we
- // are still offering it.
- if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
- return cacheKey, nil, nil, nil
- }
-
- hello.sessionTicket = session.sessionTicket
- return
- }
-
- // Check that the session ticket is not expired.
- if c.config.time().After(session.useBy) {
- c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
- }
-
- // In TLS 1.3 the KDF hash must match the resumed session. Ensure we
- // offer at least one cipher suite with that hash.
- cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
- if cipherSuite == nil {
- return cacheKey, nil, nil, nil
- }
- cipherSuiteOk := false
- for _, offeredID := range hello.cipherSuites {
- offeredSuite := cipherSuiteTLS13ByID(offeredID)
- if offeredSuite != nil && offeredSuite.hash == cipherSuite.hash {
- cipherSuiteOk = true
- break
- }
- }
- if !cipherSuiteOk {
- return cacheKey, nil, nil, nil
- }
-
- // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
- ticketAge := uint32(c.config.time().Sub(session.receivedAt) / time.Millisecond)
- identity := pskIdentity{
- label: session.sessionTicket,
- obfuscatedTicketAge: ticketAge + session.ageAdd,
- }
- hello.pskIdentities = []pskIdentity{identity}
- hello.pskBinders = [][]byte{make([]byte, cipherSuite.hash.Size())}
-
- // Compute the PSK binders. See RFC 8446, Section 4.2.11.2.
- psk := cipherSuite.expandLabel(session.masterSecret, "resumption",
- session.nonce, cipherSuite.hash.Size())
- earlySecret = cipherSuite.extract(psk, nil)
- binderKey = cipherSuite.deriveSecret(earlySecret, resumptionBinderLabel, nil)
- if c.extraConfig != nil {
- hello.earlyData = c.extraConfig.Enable0RTT && maxEarlyData > 0
- }
- transcript := cipherSuite.hash.New()
- transcript.Write(hello.marshalWithoutBinders())
- pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
- hello.updateBinders(pskBinders)
-
- if session.vers == VersionTLS13 && c.extraConfig != nil && c.extraConfig.SetAppDataFromSessionState != nil {
- c.extraConfig.SetAppDataFromSessionState(appData)
- }
- return
-}
-
-func (c *Conn) pickTLSVersion(serverHello *serverHelloMsg) error {
- peerVersion := serverHello.vers
- if serverHello.supportedVersion != 0 {
- peerVersion = serverHello.supportedVersion
- }
-
- vers, ok := c.config.mutualVersion([]uint16{peerVersion})
- if !ok {
- c.sendAlert(alertProtocolVersion)
- return fmt.Errorf("tls: server selected unsupported protocol version %x", peerVersion)
- }
-
- c.vers = vers
- c.haveVers = true
- c.in.version = vers
- c.out.version = vers
-
- return nil
-}
-
-// Does the handshake, either a full one or resumes old session. Requires hs.c,
-// hs.hello, hs.serverHello, and, optionally, hs.session to be set.
-func (hs *clientHandshakeState) handshake() error {
- c := hs.c
-
- isResume, err := hs.processServerHello()
- if err != nil {
- return err
- }
-
- hs.finishedHash = newFinishedHash(c.vers, hs.suite)
-
- // No signatures of the handshake are needed in a resumption.
- // Otherwise, in a full handshake, if we don't have any certificates
- // configured then we will never send a CertificateVerify message and
- // thus no signatures are needed in that case either.
- if isResume || (len(c.config.Certificates) == 0 && c.config.GetClientCertificate == nil) {
- hs.finishedHash.discardHandshakeBuffer()
- }
-
- hs.finishedHash.Write(hs.hello.marshal())
- hs.finishedHash.Write(hs.serverHello.marshal())
-
- c.buffering = true
- c.didResume = isResume
- if isResume {
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.readSessionTicket(); err != nil {
- return err
- }
- if err := hs.readFinished(c.serverFinished[:]); err != nil {
- return err
- }
- c.clientFinishedIsFirst = false
- // Make sure the connection is still being verified whether or not this
- // is a resumption. Resumptions currently don't reverify certificates so
- // they don't call verifyServerCertificate. See Issue 31641.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- if err := hs.sendFinished(c.clientFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- } else {
- if err := hs.doFullHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.sendFinished(c.clientFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- c.clientFinishedIsFirst = true
- if err := hs.readSessionTicket(); err != nil {
- return err
- }
- if err := hs.readFinished(c.serverFinished[:]); err != nil {
- return err
- }
- }
-
- c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-func (hs *clientHandshakeState) pickCipherSuite() error {
- if hs.suite = mutualCipherSuite(hs.hello.cipherSuites, hs.serverHello.cipherSuite); hs.suite == nil {
- hs.c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: server chose an unconfigured cipher suite")
- }
-
- hs.c.cipherSuite = hs.suite.id
- return nil
-}
-
-func (hs *clientHandshakeState) doFullHandshake() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- certMsg, ok := msg.(*certificateMsg)
- if !ok || len(certMsg.certificates) == 0 {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.finishedHash.Write(certMsg.marshal())
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- cs, ok := msg.(*certificateStatusMsg)
- if ok {
- // RFC4366 on Certificate Status Request:
- // The server MAY return a "certificate_status" message.
-
- if !hs.serverHello.ocspStapling {
- // If a server returns a "CertificateStatus" message, then the
- // server MUST have included an extension of type "status_request"
- // with empty "extension_data" in the extended server hello.
-
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: received unexpected CertificateStatus message")
- }
- hs.finishedHash.Write(cs.marshal())
-
- c.ocspResponse = cs.response
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- if c.handshakes == 0 {
- // If this is the first handshake on a connection, process and
- // (optionally) verify the server's certificates.
- if err := c.verifyServerCertificate(certMsg.certificates); err != nil {
- return err
- }
- } else {
- // This is a renegotiation handshake. We require that the
- // server's identity (i.e. leaf certificate) is unchanged and
- // thus any previous trust decision is still valid.
- //
- // See https://mitls.org/pages/attacks/3SHAKE for the
- // motivation behind this requirement.
- if !bytes.Equal(c.peerCertificates[0].Raw, certMsg.certificates[0]) {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: server's identity changed during renegotiation")
- }
- }
-
- keyAgreement := hs.suite.ka(c.vers)
-
- skx, ok := msg.(*serverKeyExchangeMsg)
- if ok {
- hs.finishedHash.Write(skx.marshal())
- err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
- if err != nil {
- c.sendAlert(alertUnexpectedMessage)
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- var chainToSend *Certificate
- var certRequested bool
- certReq, ok := msg.(*certificateRequestMsg)
- if ok {
- certRequested = true
- hs.finishedHash.Write(certReq.marshal())
-
- cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq)
- if chainToSend, err = c.getClientCertificate(cri); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- shd, ok := msg.(*serverHelloDoneMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(shd, msg)
- }
- hs.finishedHash.Write(shd.marshal())
-
- // If the server requested a certificate then we have to send a
- // Certificate message, even if it's empty because we don't have a
- // certificate to send.
- if certRequested {
- certMsg = new(certificateMsg)
- certMsg.certificates = chainToSend.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
- }
-
- preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hs.hello, c.peerCertificates[0])
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- if ckx != nil {
- hs.finishedHash.Write(ckx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
- return err
- }
- }
-
- if chainToSend != nil && len(chainToSend.Certificate) > 0 {
- certVerify := &certificateVerifyMsg{}
-
- key, ok := chainToSend.PrivateKey.(crypto.Signer)
- if !ok {
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey)
- }
-
- var sigType uint8
- var sigHash crypto.Hash
- if c.vers >= VersionTLS12 {
- signatureAlgorithm, err := selectSignatureScheme(c.vers, chainToSend, certReq.supportedSignatureAlgorithms)
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- certVerify.hasSignatureAlgorithm = true
- certVerify.signatureAlgorithm = signatureAlgorithm
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(key.Public())
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- }
-
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- certVerify.signature, err = key.Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- hs.finishedHash.Write(certVerify.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
- return err
- }
- }
-
- hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.hello.random, hs.serverHello.random)
- if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.hello.random, hs.masterSecret); err != nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: failed to write to key log: " + err.Error())
- }
-
- hs.finishedHash.discardHandshakeBuffer()
-
- return nil
-}
-
-func (hs *clientHandshakeState) establishKeys() error {
- c := hs.c
-
- clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
- keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
- var clientCipher, serverCipher interface{}
- var clientHash, serverHash hash.Hash
- if hs.suite.cipher != nil {
- clientCipher = hs.suite.cipher(clientKey, clientIV, false /* not for reading */)
- clientHash = hs.suite.mac(clientMAC)
- serverCipher = hs.suite.cipher(serverKey, serverIV, true /* for reading */)
- serverHash = hs.suite.mac(serverMAC)
- } else {
- clientCipher = hs.suite.aead(clientKey, clientIV)
- serverCipher = hs.suite.aead(serverKey, serverIV)
- }
-
- c.in.prepareCipherSpec(c.vers, serverCipher, serverHash)
- c.out.prepareCipherSpec(c.vers, clientCipher, clientHash)
- return nil
-}
-
-func (hs *clientHandshakeState) serverResumedSession() bool {
- // If the server responded with the same sessionId then it means the
- // sessionTicket is being used to resume a TLS session.
- return hs.session != nil && hs.hello.sessionId != nil &&
- bytes.Equal(hs.serverHello.sessionId, hs.hello.sessionId)
-}
-
-func (hs *clientHandshakeState) processServerHello() (bool, error) {
- c := hs.c
-
- if err := hs.pickCipherSuite(); err != nil {
- return false, err
- }
-
- if hs.serverHello.compressionMethod != compressionNone {
- c.sendAlert(alertUnexpectedMessage)
- return false, errors.New("tls: server selected unsupported compression format")
- }
-
- if c.handshakes == 0 && hs.serverHello.secureRenegotiationSupported {
- c.secureRenegotiation = true
- if len(hs.serverHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
- }
-
- if c.handshakes > 0 && c.secureRenegotiation {
- var expectedSecureRenegotiation [24]byte
- copy(expectedSecureRenegotiation[:], c.clientFinished[:])
- copy(expectedSecureRenegotiation[12:], c.serverFinished[:])
- if !bytes.Equal(hs.serverHello.secureRenegotiation, expectedSecureRenegotiation[:]) {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: incorrect renegotiation extension contents")
- }
- }
-
- if err := checkALPN(hs.hello.alpnProtocols, hs.serverHello.alpnProtocol); err != nil {
- c.sendAlert(alertUnsupportedExtension)
- return false, err
- }
- c.clientProtocol = hs.serverHello.alpnProtocol
-
- c.scts = hs.serverHello.scts
-
- if !hs.serverResumedSession() {
- return false, nil
- }
-
- if hs.session.vers != c.vers {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: server resumed a session with a different version")
- }
-
- if hs.session.cipherSuite != hs.suite.id {
- c.sendAlert(alertHandshakeFailure)
- return false, errors.New("tls: server resumed a session with a different cipher suite")
- }
-
- // Restore masterSecret, peerCerts, and ocspResponse from previous state
- hs.masterSecret = hs.session.masterSecret
- c.peerCertificates = hs.session.serverCertificates
- c.verifiedChains = hs.session.verifiedChains
- c.ocspResponse = hs.session.ocspResponse
- // Let the ServerHello SCTs override the session SCTs from the original
- // connection, if any are provided
- if len(c.scts) == 0 && len(hs.session.scts) != 0 {
- c.scts = hs.session.scts
- }
-
- return true, nil
-}
-
-// checkALPN ensure that the server's choice of ALPN protocol is compatible with
-// the protocols that we advertised in the Client Hello.
-func checkALPN(clientProtos []string, serverProto string) error {
- if serverProto == "" {
- return nil
- }
- if len(clientProtos) == 0 {
- return errors.New("tls: server advertised unrequested ALPN extension")
- }
- for _, proto := range clientProtos {
- if proto == serverProto {
- return nil
- }
- }
- return errors.New("tls: server selected unadvertised ALPN protocol")
-}
-
-func (hs *clientHandshakeState) readFinished(out []byte) error {
- c := hs.c
-
- if err := c.readChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- serverFinished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverFinished, msg)
- }
-
- verify := hs.finishedHash.serverSum(hs.masterSecret)
- if len(verify) != len(serverFinished.verifyData) ||
- subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: server's Finished message was incorrect")
- }
- hs.finishedHash.Write(serverFinished.marshal())
- copy(out, verify)
- return nil
-}
-
-func (hs *clientHandshakeState) readSessionTicket() error {
- if !hs.serverHello.ticketSupported {
- return nil
- }
-
- c := hs.c
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- sessionTicketMsg, ok := msg.(*newSessionTicketMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(sessionTicketMsg, msg)
- }
- hs.finishedHash.Write(sessionTicketMsg.marshal())
-
- hs.session = &clientSessionState{
- sessionTicket: sessionTicketMsg.ticket,
- vers: c.vers,
- cipherSuite: hs.suite.id,
- masterSecret: hs.masterSecret,
- serverCertificates: c.peerCertificates,
- verifiedChains: c.verifiedChains,
- receivedAt: c.config.time(),
- ocspResponse: c.ocspResponse,
- scts: c.scts,
- }
-
- return nil
-}
-
-func (hs *clientHandshakeState) sendFinished(out []byte) error {
- c := hs.c
-
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
- return err
- }
-
- finished := new(finishedMsg)
- finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
- copy(out, finished.verifyData)
- return nil
-}
-
-// verifyServerCertificate parses and verifies the provided chain, setting
-// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
-func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
- certs := make([]*x509.Certificate, len(certificates))
- for i, asn1Data := range certificates {
- cert, err := x509.ParseCertificate(asn1Data)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to parse certificate from server: " + err.Error())
- }
- certs[i] = cert
- }
-
- if !c.config.InsecureSkipVerify {
- opts := x509.VerifyOptions{
- Roots: c.config.RootCAs,
- CurrentTime: c.config.time(),
- DNSName: c.config.ServerName,
- Intermediates: x509.NewCertPool(),
- }
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
- var err error
- c.verifiedChains, err = certs[0].Verify(opts)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- switch certs[0].PublicKey.(type) {
- case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey:
- break
- default:
- c.sendAlert(alertUnsupportedCertificate)
- return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey)
- }
-
- c.peerCertificates = certs
-
- if c.config.VerifyPeerCertificate != nil {
- if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- return nil
-}
-
-// certificateRequestInfoFromMsg generates a CertificateRequestInfo from a TLS
-// <= 1.2 CertificateRequest, making an effort to fill in missing information.
-func certificateRequestInfoFromMsg(ctx context.Context, vers uint16, certReq *certificateRequestMsg) *CertificateRequestInfo {
- cri := &certificateRequestInfo{
- AcceptableCAs: certReq.certificateAuthorities,
- Version: vers,
- ctx: ctx,
- }
-
- var rsaAvail, ecAvail bool
- for _, certType := range certReq.certificateTypes {
- switch certType {
- case certTypeRSASign:
- rsaAvail = true
- case certTypeECDSASign:
- ecAvail = true
- }
- }
-
- if !certReq.hasSignatureAlgorithm {
- // Prior to TLS 1.2, signature schemes did not exist. In this case we
- // make up a list based on the acceptable certificate types, to help
- // GetClientCertificate and SupportsCertificate select the right certificate.
- // The hash part of the SignatureScheme is a lie here, because
- // TLS 1.0 and 1.1 always use MD5+SHA1 for RSA and SHA1 for ECDSA.
- switch {
- case rsaAvail && ecAvail:
- cri.SignatureSchemes = []SignatureScheme{
- ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
- PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
- }
- case rsaAvail:
- cri.SignatureSchemes = []SignatureScheme{
- PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1,
- }
- case ecAvail:
- cri.SignatureSchemes = []SignatureScheme{
- ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512,
- }
- }
- return toCertificateRequestInfo(cri)
- }
-
- // Filter the signature schemes based on the certificate types.
- // See RFC 5246, Section 7.4.4 (where it calls this "somewhat complicated").
- cri.SignatureSchemes = make([]SignatureScheme, 0, len(certReq.supportedSignatureAlgorithms))
- for _, sigScheme := range certReq.supportedSignatureAlgorithms {
- sigType, _, err := typeAndHashFromSignatureScheme(sigScheme)
- if err != nil {
- continue
- }
- switch sigType {
- case signatureECDSA, signatureEd25519:
- if ecAvail {
- cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
- }
- case signatureRSAPSS, signaturePKCS1v15:
- if rsaAvail {
- cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme)
- }
- }
- }
-
- return toCertificateRequestInfo(cri)
-}
-
-func (c *Conn) getClientCertificate(cri *CertificateRequestInfo) (*Certificate, error) {
- if c.config.GetClientCertificate != nil {
- return c.config.GetClientCertificate(cri)
- }
-
- for _, chain := range c.config.Certificates {
- if err := cri.SupportsCertificate(&chain); err != nil {
- continue
- }
- return &chain, nil
- }
-
- // No acceptable certificate found. Don't send a certificate.
- return new(Certificate), nil
-}
-
-const clientSessionCacheKeyPrefix = "qtls-"
-
-// clientSessionCacheKey returns a key used to cache sessionTickets that could
-// be used to resume previously negotiated TLS sessions with a server.
-func clientSessionCacheKey(serverAddr net.Addr, config *config) string {
- if len(config.ServerName) > 0 {
- return clientSessionCacheKeyPrefix + config.ServerName
- }
- return clientSessionCacheKeyPrefix + serverAddr.String()
-}
-
-// hostnameInSNI converts name into an appropriate hostname for SNI.
-// Literal IP addresses and absolute FQDNs are not permitted as SNI values.
-// See RFC 6066, Section 3.
-func hostnameInSNI(name string) string {
- host := name
- if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' {
- host = host[1 : len(host)-1]
- }
- if i := strings.LastIndex(host, "%"); i > 0 {
- host = host[:i]
- }
- if net.ParseIP(host) != nil {
- return ""
- }
- for len(name) > 0 && name[len(name)-1] == '.' {
- name = name[:len(name)-1]
- }
- return name
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_messages.go b/vendor/github.com/marten-seemann/qtls-go1-17/handshake_messages.go
deleted file mode 100644
index 1ab757626..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_messages.go
+++ /dev/null
@@ -1,1832 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "fmt"
- "strings"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-// The marshalingFunction type is an adapter to allow the use of ordinary
-// functions as cryptobyte.MarshalingValue.
-type marshalingFunction func(b *cryptobyte.Builder) error
-
-func (f marshalingFunction) Marshal(b *cryptobyte.Builder) error {
- return f(b)
-}
-
-// addBytesWithLength appends a sequence of bytes to the cryptobyte.Builder. If
-// the length of the sequence is not the value specified, it produces an error.
-func addBytesWithLength(b *cryptobyte.Builder, v []byte, n int) {
- b.AddValue(marshalingFunction(func(b *cryptobyte.Builder) error {
- if len(v) != n {
- return fmt.Errorf("invalid value length: expected %d, got %d", n, len(v))
- }
- b.AddBytes(v)
- return nil
- }))
-}
-
-// addUint64 appends a big-endian, 64-bit value to the cryptobyte.Builder.
-func addUint64(b *cryptobyte.Builder, v uint64) {
- b.AddUint32(uint32(v >> 32))
- b.AddUint32(uint32(v))
-}
-
-// readUint64 decodes a big-endian, 64-bit value into out and advances over it.
-// It reports whether the read was successful.
-func readUint64(s *cryptobyte.String, out *uint64) bool {
- var hi, lo uint32
- if !s.ReadUint32(&hi) || !s.ReadUint32(&lo) {
- return false
- }
- *out = uint64(hi)<<32 | uint64(lo)
- return true
-}
-
-// readUint8LengthPrefixed acts like s.ReadUint8LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint8LengthPrefixed((*cryptobyte.String)(out))
-}
-
-// readUint16LengthPrefixed acts like s.ReadUint16LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint16LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint16LengthPrefixed((*cryptobyte.String)(out))
-}
-
-// readUint24LengthPrefixed acts like s.ReadUint24LengthPrefixed, but targets a
-// []byte instead of a cryptobyte.String.
-func readUint24LengthPrefixed(s *cryptobyte.String, out *[]byte) bool {
- return s.ReadUint24LengthPrefixed((*cryptobyte.String)(out))
-}
-
-type clientHelloMsg struct {
- raw []byte
- vers uint16
- random []byte
- sessionId []byte
- cipherSuites []uint16
- compressionMethods []uint8
- serverName string
- ocspStapling bool
- supportedCurves []CurveID
- supportedPoints []uint8
- ticketSupported bool
- sessionTicket []uint8
- supportedSignatureAlgorithms []SignatureScheme
- supportedSignatureAlgorithmsCert []SignatureScheme
- secureRenegotiationSupported bool
- secureRenegotiation []byte
- alpnProtocols []string
- scts bool
- supportedVersions []uint16
- cookie []byte
- keyShares []keyShare
- earlyData bool
- pskModes []uint8
- pskIdentities []pskIdentity
- pskBinders [][]byte
- additionalExtensions []Extension
-}
-
-func (m *clientHelloMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeClientHello)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.vers)
- addBytesWithLength(b, m.random, 32)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionId)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, suite := range m.cipherSuites {
- b.AddUint16(suite)
- }
- })
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.compressionMethods)
- })
-
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.serverName) > 0 {
- // RFC 6066, Section 3
- b.AddUint16(extensionServerName)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // name_type = host_name
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.serverName))
- })
- })
- })
- }
- if m.ocspStapling {
- // RFC 4366, Section 3.6
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(1) // status_type = ocsp
- b.AddUint16(0) // empty responder_id_list
- b.AddUint16(0) // empty request_extensions
- })
- }
- if len(m.supportedCurves) > 0 {
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- b.AddUint16(extensionSupportedCurves)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, curve := range m.supportedCurves {
- b.AddUint16(uint16(curve))
- }
- })
- })
- }
- if len(m.supportedPoints) > 0 {
- // RFC 4492, Section 5.1.2
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
- if m.ticketSupported {
- // RFC 5077, Section 3.2
- b.AddUint16(extensionSessionTicket)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionTicket)
- })
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- // RFC 5246, Section 7.4.1.4.1
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- // RFC 8446, Section 4.2.3
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if m.secureRenegotiationSupported {
- // RFC 5746, Section 3.2
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocols) > 0 {
- // RFC 7301, Section 3.1
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, proto := range m.alpnProtocols {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(proto))
- })
- }
- })
- })
- }
- if m.scts {
- // RFC 6962, Section 3.3.1
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedVersions) > 0 {
- // RFC 8446, Section 4.2.1
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, vers := range m.supportedVersions {
- b.AddUint16(vers)
- }
- })
- })
- }
- if len(m.cookie) > 0 {
- // RFC 8446, Section 4.2.2
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if len(m.keyShares) > 0 {
- // RFC 8446, Section 4.2.8
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ks := range m.keyShares {
- b.AddUint16(uint16(ks.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ks.data)
- })
- }
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.pskModes) > 0 {
- // RFC 8446, Section 4.2.9
- b.AddUint16(extensionPSKModes)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.pskModes)
- })
- })
- }
- for _, ext := range m.additionalExtensions {
- b.AddUint16(ext.Type)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ext.Data)
- })
- }
- if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
- // RFC 8446, Section 4.2.11
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, psk := range m.pskIdentities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(psk.label)
- })
- b.AddUint32(psk.obfuscatedTicketAge)
- }
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-// marshalWithoutBinders returns the ClientHello through the
-// PreSharedKeyExtension.identities field, according to RFC 8446, Section
-// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
-func (m *clientHelloMsg) marshalWithoutBinders() []byte {
- bindersLen := 2 // uint16 length prefix
- for _, binder := range m.pskBinders {
- bindersLen += 1 // uint8 length prefix
- bindersLen += len(binder)
- }
-
- fullMessage := m.marshal()
- return fullMessage[:len(fullMessage)-bindersLen]
-}
-
-// updateBinders updates the m.pskBinders field, if necessary updating the
-// cached marshaled representation. The supplied binders must have the same
-// length as the current m.pskBinders.
-func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
- if len(pskBinders) != len(m.pskBinders) {
- panic("tls: internal error: pskBinders length mismatch")
- }
- for i := range m.pskBinders {
- if len(pskBinders[i]) != len(m.pskBinders[i]) {
- panic("tls: internal error: pskBinders length mismatch")
- }
- }
- m.pskBinders = pskBinders
- if m.raw != nil {
- lenWithoutBinders := len(m.marshalWithoutBinders())
- // TODO(filippo): replace with NewFixedBuilder once CL 148882 is imported.
- b := cryptobyte.NewBuilder(m.raw[:lenWithoutBinders])
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- if len(b.BytesOrPanic()) != len(m.raw) {
- panic("tls: internal error: failed to update binders")
- }
- }
-}
-
-func (m *clientHelloMsg) unmarshal(data []byte) bool {
- *m = clientHelloMsg{raw: data}
- s := cryptobyte.String(data)
-
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
- !readUint8LengthPrefixed(&s, &m.sessionId) {
- return false
- }
-
- var cipherSuites cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&cipherSuites) {
- return false
- }
- m.cipherSuites = []uint16{}
- m.secureRenegotiationSupported = false
- for !cipherSuites.Empty() {
- var suite uint16
- if !cipherSuites.ReadUint16(&suite) {
- return false
- }
- if suite == scsvRenegotiation {
- m.secureRenegotiationSupported = true
- }
- m.cipherSuites = append(m.cipherSuites, suite)
- }
-
- if !readUint8LengthPrefixed(&s, &m.compressionMethods) {
- return false
- }
-
- if s.Empty() {
- // ClientHello is optionally followed by extension data
- return true
- }
-
- var extensions cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var ext uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&ext) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch ext {
- case extensionServerName:
- // RFC 6066, Section 3
- var nameList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
- return false
- }
- for !nameList.Empty() {
- var nameType uint8
- var serverName cryptobyte.String
- if !nameList.ReadUint8(&nameType) ||
- !nameList.ReadUint16LengthPrefixed(&serverName) ||
- serverName.Empty() {
- return false
- }
- if nameType != 0 {
- continue
- }
- if len(m.serverName) != 0 {
- // Multiple names of the same name_type are prohibited.
- return false
- }
- m.serverName = string(serverName)
- // An SNI value may not include a trailing dot.
- if strings.HasSuffix(m.serverName, ".") {
- return false
- }
- }
- case extensionStatusRequest:
- // RFC 4366, Section 3.6
- var statusType uint8
- var ignored cryptobyte.String
- if !extData.ReadUint8(&statusType) ||
- !extData.ReadUint16LengthPrefixed(&ignored) ||
- !extData.ReadUint16LengthPrefixed(&ignored) {
- return false
- }
- m.ocspStapling = statusType == statusTypeOCSP
- case extensionSupportedCurves:
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- var curves cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&curves) || curves.Empty() {
- return false
- }
- for !curves.Empty() {
- var curve uint16
- if !curves.ReadUint16(&curve) {
- return false
- }
- m.supportedCurves = append(m.supportedCurves, CurveID(curve))
- }
- case extensionSupportedPoints:
- // RFC 4492, Section 5.1.2
- if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
- len(m.supportedPoints) == 0 {
- return false
- }
- case extensionSessionTicket:
- // RFC 5077, Section 3.2
- m.ticketSupported = true
- extData.ReadBytes(&m.sessionTicket, len(extData))
- case extensionSignatureAlgorithms:
- // RFC 5246, Section 7.4.1.4.1
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithms = append(
- m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
- }
- case extensionSignatureAlgorithmsCert:
- // RFC 8446, Section 4.2.3
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithmsCert = append(
- m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
- }
- case extensionRenegotiationInfo:
- // RFC 5746, Section 3.2
- if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
- return false
- }
- m.secureRenegotiationSupported = true
- case extensionALPN:
- // RFC 7301, Section 3.1
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- for !protoList.Empty() {
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
- return false
- }
- m.alpnProtocols = append(m.alpnProtocols, string(proto))
- }
- case extensionSCT:
- // RFC 6962, Section 3.3.1
- m.scts = true
- case extensionSupportedVersions:
- // RFC 8446, Section 4.2.1
- var versList cryptobyte.String
- if !extData.ReadUint8LengthPrefixed(&versList) || versList.Empty() {
- return false
- }
- for !versList.Empty() {
- var vers uint16
- if !versList.ReadUint16(&vers) {
- return false
- }
- m.supportedVersions = append(m.supportedVersions, vers)
- }
- case extensionCookie:
- // RFC 8446, Section 4.2.2
- if !readUint16LengthPrefixed(&extData, &m.cookie) ||
- len(m.cookie) == 0 {
- return false
- }
- case extensionKeyShare:
- // RFC 8446, Section 4.2.8
- var clientShares cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&clientShares) {
- return false
- }
- for !clientShares.Empty() {
- var ks keyShare
- if !clientShares.ReadUint16((*uint16)(&ks.group)) ||
- !readUint16LengthPrefixed(&clientShares, &ks.data) ||
- len(ks.data) == 0 {
- return false
- }
- m.keyShares = append(m.keyShares, ks)
- }
- case extensionEarlyData:
- // RFC 8446, Section 4.2.10
- m.earlyData = true
- case extensionPSKModes:
- // RFC 8446, Section 4.2.9
- if !readUint8LengthPrefixed(&extData, &m.pskModes) {
- return false
- }
- case extensionPreSharedKey:
- // RFC 8446, Section 4.2.11
- if !extensions.Empty() {
- return false // pre_shared_key must be the last extension
- }
- var identities cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&identities) || identities.Empty() {
- return false
- }
- for !identities.Empty() {
- var psk pskIdentity
- if !readUint16LengthPrefixed(&identities, &psk.label) ||
- !identities.ReadUint32(&psk.obfuscatedTicketAge) ||
- len(psk.label) == 0 {
- return false
- }
- m.pskIdentities = append(m.pskIdentities, psk)
- }
- var binders cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&binders) || binders.Empty() {
- return false
- }
- for !binders.Empty() {
- var binder []byte
- if !readUint8LengthPrefixed(&binders, &binder) ||
- len(binder) == 0 {
- return false
- }
- m.pskBinders = append(m.pskBinders, binder)
- }
- default:
- m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData})
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type serverHelloMsg struct {
- raw []byte
- vers uint16
- random []byte
- sessionId []byte
- cipherSuite uint16
- compressionMethod uint8
- ocspStapling bool
- ticketSupported bool
- secureRenegotiationSupported bool
- secureRenegotiation []byte
- alpnProtocol string
- scts [][]byte
- supportedVersion uint16
- serverShare keyShare
- selectedIdentityPresent bool
- selectedIdentity uint16
- supportedPoints []uint8
-
- // HelloRetryRequest extensions
- cookie []byte
- selectedGroup CurveID
-}
-
-func (m *serverHelloMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeServerHello)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.vers)
- addBytesWithLength(b, m.random, 32)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionId)
- })
- b.AddUint16(m.cipherSuite)
- b.AddUint8(m.compressionMethod)
-
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.ticketSupported {
- b.AddUint16(extensionSessionTicket)
- b.AddUint16(0) // empty extension_data
- }
- if m.secureRenegotiationSupported {
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if len(m.scts) > 0 {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range m.scts {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- if m.supportedVersion != 0 {
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.supportedVersion)
- })
- }
- if m.serverShare.group != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.serverShare.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.serverShare.data)
- })
- })
- }
- if m.selectedIdentityPresent {
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.selectedIdentity)
- })
- }
-
- if len(m.cookie) > 0 {
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if m.selectedGroup != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.selectedGroup))
- })
- }
- if len(m.supportedPoints) > 0 {
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *serverHelloMsg) unmarshal(data []byte) bool {
- *m = serverHelloMsg{raw: data}
- s := cryptobyte.String(data)
-
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) ||
- !readUint8LengthPrefixed(&s, &m.sessionId) ||
- !s.ReadUint16(&m.cipherSuite) ||
- !s.ReadUint8(&m.compressionMethod) {
- return false
- }
-
- if s.Empty() {
- // ServerHello is optionally followed by extension data
- return true
- }
-
- var extensions cryptobyte.String
- if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionStatusRequest:
- m.ocspStapling = true
- case extensionSessionTicket:
- m.ticketSupported = true
- case extensionRenegotiationInfo:
- if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) {
- return false
- }
- m.secureRenegotiationSupported = true
- case extensionALPN:
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) ||
- proto.Empty() || !protoList.Empty() {
- return false
- }
- m.alpnProtocol = string(proto)
- case extensionSCT:
- var sctList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
- return false
- }
- for !sctList.Empty() {
- var sct []byte
- if !readUint16LengthPrefixed(&sctList, &sct) ||
- len(sct) == 0 {
- return false
- }
- m.scts = append(m.scts, sct)
- }
- case extensionSupportedVersions:
- if !extData.ReadUint16(&m.supportedVersion) {
- return false
- }
- case extensionCookie:
- if !readUint16LengthPrefixed(&extData, &m.cookie) ||
- len(m.cookie) == 0 {
- return false
- }
- case extensionKeyShare:
- // This extension has different formats in SH and HRR, accept either
- // and let the handshake logic decide. See RFC 8446, Section 4.2.8.
- if len(extData) == 2 {
- if !extData.ReadUint16((*uint16)(&m.selectedGroup)) {
- return false
- }
- } else {
- if !extData.ReadUint16((*uint16)(&m.serverShare.group)) ||
- !readUint16LengthPrefixed(&extData, &m.serverShare.data) {
- return false
- }
- }
- case extensionPreSharedKey:
- m.selectedIdentityPresent = true
- if !extData.ReadUint16(&m.selectedIdentity) {
- return false
- }
- case extensionSupportedPoints:
- // RFC 4492, Section 5.1.2
- if !readUint8LengthPrefixed(&extData, &m.supportedPoints) ||
- len(m.supportedPoints) == 0 {
- return false
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type encryptedExtensionsMsg struct {
- raw []byte
- alpnProtocol string
- earlyData bool
-
- additionalExtensions []Extension
-}
-
-func (m *encryptedExtensionsMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeEncryptedExtensions)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- for _, ext := range m.additionalExtensions {
- b.AddUint16(ext.Type)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ext.Data)
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
- *m = encryptedExtensionsMsg{raw: data}
- s := cryptobyte.String(data)
-
- var extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var ext uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&ext) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch ext {
- case extensionALPN:
- var protoList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
- return false
- }
- var proto cryptobyte.String
- if !protoList.ReadUint8LengthPrefixed(&proto) ||
- proto.Empty() || !protoList.Empty() {
- return false
- }
- m.alpnProtocol = string(proto)
- case extensionEarlyData:
- m.earlyData = true
- default:
- m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData})
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type endOfEarlyDataMsg struct{}
-
-func (m *endOfEarlyDataMsg) marshal() []byte {
- x := make([]byte, 4)
- x[0] = typeEndOfEarlyData
- return x
-}
-
-func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
-
-type keyUpdateMsg struct {
- raw []byte
- updateRequested bool
-}
-
-func (m *keyUpdateMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeKeyUpdate)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.updateRequested {
- b.AddUint8(1)
- } else {
- b.AddUint8(0)
- }
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *keyUpdateMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- var updateRequested uint8
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8(&updateRequested) || !s.Empty() {
- return false
- }
- switch updateRequested {
- case 0:
- m.updateRequested = false
- case 1:
- m.updateRequested = true
- default:
- return false
- }
- return true
-}
-
-type newSessionTicketMsgTLS13 struct {
- raw []byte
- lifetime uint32
- ageAdd uint32
- nonce []byte
- label []byte
- maxEarlyData uint32
-}
-
-func (m *newSessionTicketMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeNewSessionTicket)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint32(m.lifetime)
- b.AddUint32(m.ageAdd)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.nonce)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.label)
- })
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.maxEarlyData > 0 {
- b.AddUint16(extensionEarlyData)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint32(m.maxEarlyData)
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
- *m = newSessionTicketMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint32(&m.lifetime) ||
- !s.ReadUint32(&m.ageAdd) ||
- !readUint8LengthPrefixed(&s, &m.nonce) ||
- !readUint16LengthPrefixed(&s, &m.label) ||
- !s.ReadUint16LengthPrefixed(&extensions) ||
- !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionEarlyData:
- if !extData.ReadUint32(&m.maxEarlyData) {
- return false
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type certificateRequestMsgTLS13 struct {
- raw []byte
- ocspStapling bool
- scts bool
- supportedSignatureAlgorithms []SignatureScheme
- supportedSignatureAlgorithmsCert []SignatureScheme
- certificateAuthorities [][]byte
-}
-
-func (m *certificateRequestMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateRequest)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- // certificate_request_context (SHALL be zero length unless used for
- // post-handshake authentication)
- b.AddUint8(0)
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.scts {
- // RFC 8446, Section 4.4.2.1 makes no mention of
- // signed_certificate_timestamp in CertificateRequest, but
- // "Extensions in the Certificate message from the client MUST
- // correspond to extensions in the CertificateRequest message
- // from the server." and it appears in the table in Section 4.2.
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.certificateAuthorities) > 0 {
- b.AddUint16(extensionCertificateAuthorities)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ca := range m.certificateAuthorities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ca)
- })
- }
- })
- })
- }
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
- *m = certificateRequestMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var context, extensions cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
- !s.ReadUint16LengthPrefixed(&extensions) ||
- !s.Empty() {
- return false
- }
-
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
-
- switch extension {
- case extensionStatusRequest:
- m.ocspStapling = true
- case extensionSCT:
- m.scts = true
- case extensionSignatureAlgorithms:
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithms = append(
- m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg))
- }
- case extensionSignatureAlgorithmsCert:
- var sigAndAlgs cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() {
- return false
- }
- for !sigAndAlgs.Empty() {
- var sigAndAlg uint16
- if !sigAndAlgs.ReadUint16(&sigAndAlg) {
- return false
- }
- m.supportedSignatureAlgorithmsCert = append(
- m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg))
- }
- case extensionCertificateAuthorities:
- var auths cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&auths) || auths.Empty() {
- return false
- }
- for !auths.Empty() {
- var ca []byte
- if !readUint16LengthPrefixed(&auths, &ca) || len(ca) == 0 {
- return false
- }
- m.certificateAuthorities = append(m.certificateAuthorities, ca)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
-
- return true
-}
-
-type certificateMsg struct {
- raw []byte
- certificates [][]byte
-}
-
-func (m *certificateMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- var i int
- for _, slice := range m.certificates {
- i += len(slice)
- }
-
- length := 3 + 3*len(m.certificates) + i
- x = make([]byte, 4+length)
- x[0] = typeCertificate
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
-
- certificateOctets := length - 3
- x[4] = uint8(certificateOctets >> 16)
- x[5] = uint8(certificateOctets >> 8)
- x[6] = uint8(certificateOctets)
-
- y := x[7:]
- for _, slice := range m.certificates {
- y[0] = uint8(len(slice) >> 16)
- y[1] = uint8(len(slice) >> 8)
- y[2] = uint8(len(slice))
- copy(y[3:], slice)
- y = y[3+len(slice):]
- }
-
- m.raw = x
- return
-}
-
-func (m *certificateMsg) unmarshal(data []byte) bool {
- if len(data) < 7 {
- return false
- }
-
- m.raw = data
- certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6])
- if uint32(len(data)) != certsLen+7 {
- return false
- }
-
- numCerts := 0
- d := data[7:]
- for certsLen > 0 {
- if len(d) < 4 {
- return false
- }
- certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
- if uint32(len(d)) < 3+certLen {
- return false
- }
- d = d[3+certLen:]
- certsLen -= 3 + certLen
- numCerts++
- }
-
- m.certificates = make([][]byte, numCerts)
- d = data[7:]
- for i := 0; i < numCerts; i++ {
- certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2])
- m.certificates[i] = d[3 : 3+certLen]
- d = d[3+certLen:]
- }
-
- return true
-}
-
-type certificateMsgTLS13 struct {
- raw []byte
- certificate Certificate
- ocspStapling bool
- scts bool
-}
-
-func (m *certificateMsgTLS13) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificate)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // certificate_request_context
-
- certificate := m.certificate
- if !m.ocspStapling {
- certificate.OCSPStaple = nil
- }
- if !m.scts {
- certificate.SignedCertificateTimestamps = nil
- }
- marshalCertificate(b, certificate)
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- for i, cert := range certificate.Certificate {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(cert)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if i > 0 {
- // This library only supports OCSP and SCT for leaf certificates.
- return
- }
- if certificate.OCSPStaple != nil {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(statusTypeOCSP)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(certificate.OCSPStaple)
- })
- })
- }
- if certificate.SignedCertificateTimestamps != nil {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range certificate.SignedCertificateTimestamps {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- })
- }
- })
-}
-
-func (m *certificateMsgTLS13) unmarshal(data []byte) bool {
- *m = certificateMsgTLS13{raw: data}
- s := cryptobyte.String(data)
-
- var context cryptobyte.String
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8LengthPrefixed(&context) || !context.Empty() ||
- !unmarshalCertificate(&s, &m.certificate) ||
- !s.Empty() {
- return false
- }
-
- m.scts = m.certificate.SignedCertificateTimestamps != nil
- m.ocspStapling = m.certificate.OCSPStaple != nil
-
- return true
-}
-
-func unmarshalCertificate(s *cryptobyte.String, certificate *Certificate) bool {
- var certList cryptobyte.String
- if !s.ReadUint24LengthPrefixed(&certList) {
- return false
- }
- for !certList.Empty() {
- var cert []byte
- var extensions cryptobyte.String
- if !readUint24LengthPrefixed(&certList, &cert) ||
- !certList.ReadUint16LengthPrefixed(&extensions) {
- return false
- }
- certificate.Certificate = append(certificate.Certificate, cert)
- for !extensions.Empty() {
- var extension uint16
- var extData cryptobyte.String
- if !extensions.ReadUint16(&extension) ||
- !extensions.ReadUint16LengthPrefixed(&extData) {
- return false
- }
- if len(certificate.Certificate) > 1 {
- // This library only supports OCSP and SCT for leaf certificates.
- continue
- }
-
- switch extension {
- case extensionStatusRequest:
- var statusType uint8
- if !extData.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
- !readUint24LengthPrefixed(&extData, &certificate.OCSPStaple) ||
- len(certificate.OCSPStaple) == 0 {
- return false
- }
- case extensionSCT:
- var sctList cryptobyte.String
- if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() {
- return false
- }
- for !sctList.Empty() {
- var sct []byte
- if !readUint16LengthPrefixed(&sctList, &sct) ||
- len(sct) == 0 {
- return false
- }
- certificate.SignedCertificateTimestamps = append(
- certificate.SignedCertificateTimestamps, sct)
- }
- default:
- // Ignore unknown extensions.
- continue
- }
-
- if !extData.Empty() {
- return false
- }
- }
- }
- return true
-}
-
-type serverKeyExchangeMsg struct {
- raw []byte
- key []byte
-}
-
-func (m *serverKeyExchangeMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
- length := len(m.key)
- x := make([]byte, length+4)
- x[0] = typeServerKeyExchange
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- copy(x[4:], m.key)
-
- m.raw = x
- return x
-}
-
-func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
- m.raw = data
- if len(data) < 4 {
- return false
- }
- m.key = data[4:]
- return true
-}
-
-type certificateStatusMsg struct {
- raw []byte
- response []byte
-}
-
-func (m *certificateStatusMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateStatus)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(statusTypeOCSP)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.response)
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateStatusMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- var statusType uint8
- if !s.Skip(4) || // message type and uint24 length field
- !s.ReadUint8(&statusType) || statusType != statusTypeOCSP ||
- !readUint24LengthPrefixed(&s, &m.response) ||
- len(m.response) == 0 || !s.Empty() {
- return false
- }
- return true
-}
-
-type serverHelloDoneMsg struct{}
-
-func (m *serverHelloDoneMsg) marshal() []byte {
- x := make([]byte, 4)
- x[0] = typeServerHelloDone
- return x
-}
-
-func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
-
-type clientKeyExchangeMsg struct {
- raw []byte
- ciphertext []byte
-}
-
-func (m *clientKeyExchangeMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
- length := len(m.ciphertext)
- x := make([]byte, length+4)
- x[0] = typeClientKeyExchange
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- copy(x[4:], m.ciphertext)
-
- m.raw = x
- return x
-}
-
-func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
- m.raw = data
- if len(data) < 4 {
- return false
- }
- l := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
- if l != len(data)-4 {
- return false
- }
- m.ciphertext = data[4:]
- return true
-}
-
-type finishedMsg struct {
- raw []byte
- verifyData []byte
-}
-
-func (m *finishedMsg) marshal() []byte {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeFinished)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.verifyData)
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *finishedMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
- return s.Skip(1) &&
- readUint24LengthPrefixed(&s, &m.verifyData) &&
- s.Empty()
-}
-
-type certificateRequestMsg struct {
- raw []byte
- // hasSignatureAlgorithm indicates whether this message includes a list of
- // supported signature algorithms. This change was introduced with TLS 1.2.
- hasSignatureAlgorithm bool
-
- certificateTypes []byte
- supportedSignatureAlgorithms []SignatureScheme
- certificateAuthorities [][]byte
-}
-
-func (m *certificateRequestMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- // See RFC 4346, Section 7.4.4.
- length := 1 + len(m.certificateTypes) + 2
- casLength := 0
- for _, ca := range m.certificateAuthorities {
- casLength += 2 + len(ca)
- }
- length += casLength
-
- if m.hasSignatureAlgorithm {
- length += 2 + 2*len(m.supportedSignatureAlgorithms)
- }
-
- x = make([]byte, 4+length)
- x[0] = typeCertificateRequest
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
-
- x[4] = uint8(len(m.certificateTypes))
-
- copy(x[5:], m.certificateTypes)
- y := x[5+len(m.certificateTypes):]
-
- if m.hasSignatureAlgorithm {
- n := len(m.supportedSignatureAlgorithms) * 2
- y[0] = uint8(n >> 8)
- y[1] = uint8(n)
- y = y[2:]
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- y[0] = uint8(sigAlgo >> 8)
- y[1] = uint8(sigAlgo)
- y = y[2:]
- }
- }
-
- y[0] = uint8(casLength >> 8)
- y[1] = uint8(casLength)
- y = y[2:]
- for _, ca := range m.certificateAuthorities {
- y[0] = uint8(len(ca) >> 8)
- y[1] = uint8(len(ca))
- y = y[2:]
- copy(y, ca)
- y = y[len(ca):]
- }
-
- m.raw = x
- return
-}
-
-func (m *certificateRequestMsg) unmarshal(data []byte) bool {
- m.raw = data
-
- if len(data) < 5 {
- return false
- }
-
- length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
- if uint32(len(data))-4 != length {
- return false
- }
-
- numCertTypes := int(data[4])
- data = data[5:]
- if numCertTypes == 0 || len(data) <= numCertTypes {
- return false
- }
-
- m.certificateTypes = make([]byte, numCertTypes)
- if copy(m.certificateTypes, data) != numCertTypes {
- return false
- }
-
- data = data[numCertTypes:]
-
- if m.hasSignatureAlgorithm {
- if len(data) < 2 {
- return false
- }
- sigAndHashLen := uint16(data[0])<<8 | uint16(data[1])
- data = data[2:]
- if sigAndHashLen&1 != 0 {
- return false
- }
- if len(data) < int(sigAndHashLen) {
- return false
- }
- numSigAlgos := sigAndHashLen / 2
- m.supportedSignatureAlgorithms = make([]SignatureScheme, numSigAlgos)
- for i := range m.supportedSignatureAlgorithms {
- m.supportedSignatureAlgorithms[i] = SignatureScheme(data[0])<<8 | SignatureScheme(data[1])
- data = data[2:]
- }
- }
-
- if len(data) < 2 {
- return false
- }
- casLength := uint16(data[0])<<8 | uint16(data[1])
- data = data[2:]
- if len(data) < int(casLength) {
- return false
- }
- cas := make([]byte, casLength)
- copy(cas, data)
- data = data[casLength:]
-
- m.certificateAuthorities = nil
- for len(cas) > 0 {
- if len(cas) < 2 {
- return false
- }
- caLen := uint16(cas[0])<<8 | uint16(cas[1])
- cas = cas[2:]
-
- if len(cas) < int(caLen) {
- return false
- }
-
- m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen])
- cas = cas[caLen:]
- }
-
- return len(data) == 0
-}
-
-type certificateVerifyMsg struct {
- raw []byte
- hasSignatureAlgorithm bool // format change introduced in TLS 1.2
- signatureAlgorithm SignatureScheme
- signature []byte
-}
-
-func (m *certificateVerifyMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- var b cryptobyte.Builder
- b.AddUint8(typeCertificateVerify)
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.hasSignatureAlgorithm {
- b.AddUint16(uint16(m.signatureAlgorithm))
- }
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.signature)
- })
- })
-
- m.raw = b.BytesOrPanic()
- return m.raw
-}
-
-func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
- m.raw = data
- s := cryptobyte.String(data)
-
- if !s.Skip(4) { // message type and uint24 length field
- return false
- }
- if m.hasSignatureAlgorithm {
- if !s.ReadUint16((*uint16)(&m.signatureAlgorithm)) {
- return false
- }
- }
- return readUint16LengthPrefixed(&s, &m.signature) && s.Empty()
-}
-
-type newSessionTicketMsg struct {
- raw []byte
- ticket []byte
-}
-
-func (m *newSessionTicketMsg) marshal() (x []byte) {
- if m.raw != nil {
- return m.raw
- }
-
- // See RFC 5077, Section 3.3.
- ticketLen := len(m.ticket)
- length := 2 + 4 + ticketLen
- x = make([]byte, 4+length)
- x[0] = typeNewSessionTicket
- x[1] = uint8(length >> 16)
- x[2] = uint8(length >> 8)
- x[3] = uint8(length)
- x[8] = uint8(ticketLen >> 8)
- x[9] = uint8(ticketLen)
- copy(x[10:], m.ticket)
-
- m.raw = x
-
- return
-}
-
-func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
- m.raw = data
-
- if len(data) < 10 {
- return false
- }
-
- length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
- if uint32(len(data))-4 != length {
- return false
- }
-
- ticketLen := int(data[8])<<8 + int(data[9])
- if len(data)-10 != ticketLen {
- return false
- }
-
- m.ticket = data[10:]
-
- return true
-}
-
-type helloRequestMsg struct {
-}
-
-func (*helloRequestMsg) marshal() []byte {
- return []byte{typeHelloRequest, 0, 0, 0}
-}
-
-func (*helloRequestMsg) unmarshal(data []byte) bool {
- return len(data) == 4
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_server.go b/vendor/github.com/marten-seemann/qtls-go1-17/handshake_server.go
deleted file mode 100644
index 7d3557d72..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_server.go
+++ /dev/null
@@ -1,905 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "hash"
- "io"
- "sync/atomic"
- "time"
-)
-
-// serverHandshakeState contains details of a server handshake in progress.
-// It's discarded once the handshake has completed.
-type serverHandshakeState struct {
- c *Conn
- ctx context.Context
- clientHello *clientHelloMsg
- hello *serverHelloMsg
- suite *cipherSuite
- ecdheOk bool
- ecSignOk bool
- rsaDecryptOk bool
- rsaSignOk bool
- sessionState *sessionState
- finishedHash finishedHash
- masterSecret []byte
- cert *Certificate
-}
-
-// serverHandshake performs a TLS handshake as a server.
-func (c *Conn) serverHandshake(ctx context.Context) error {
- c.setAlternativeRecordLayer()
-
- clientHello, err := c.readClientHello(ctx)
- if err != nil {
- return err
- }
-
- if c.vers == VersionTLS13 {
- hs := serverHandshakeStateTLS13{
- c: c,
- ctx: ctx,
- clientHello: clientHello,
- }
- return hs.handshake()
- } else if c.extraConfig.usesAlternativeRecordLayer() {
- // This should already have been caught by the check that the ClientHello doesn't
- // offer any (supported) versions older than TLS 1.3.
- // Check again to make sure we can't be tricked into using an older version.
- c.sendAlert(alertProtocolVersion)
- return errors.New("tls: negotiated TLS < 1.3 when using QUIC")
- }
-
- hs := serverHandshakeState{
- c: c,
- ctx: ctx,
- clientHello: clientHello,
- }
- return hs.handshake()
-}
-
-func (hs *serverHandshakeState) handshake() error {
- c := hs.c
-
- if err := hs.processClientHello(); err != nil {
- return err
- }
-
- // For an overview of TLS handshaking, see RFC 5246, Section 7.3.
- c.buffering = true
- if hs.checkForResumption() {
- // The client has included a session ticket and so we do an abbreviated handshake.
- c.didResume = true
- if err := hs.doResumeHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.sendSessionTicket(); err != nil {
- return err
- }
- if err := hs.sendFinished(c.serverFinished[:]); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- c.clientFinishedIsFirst = false
- if err := hs.readFinished(nil); err != nil {
- return err
- }
- } else {
- // The client didn't include a session ticket, or it wasn't
- // valid so we do a full handshake.
- if err := hs.pickCipherSuite(); err != nil {
- return err
- }
- if err := hs.doFullHandshake(); err != nil {
- return err
- }
- if err := hs.establishKeys(); err != nil {
- return err
- }
- if err := hs.readFinished(c.clientFinished[:]); err != nil {
- return err
- }
- c.clientFinishedIsFirst = true
- c.buffering = true
- if err := hs.sendSessionTicket(); err != nil {
- return err
- }
- if err := hs.sendFinished(nil); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
- }
-
- c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-// readClientHello reads a ClientHello message and selects the protocol version.
-func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) {
- msg, err := c.readHandshake()
- if err != nil {
- return nil, err
- }
- clientHello, ok := msg.(*clientHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return nil, unexpectedMessageError(clientHello, msg)
- }
-
- var configForClient *config
- originalConfig := c.config
- if c.config.GetConfigForClient != nil {
- chi := newClientHelloInfo(ctx, c, clientHello)
- if cfc, err := c.config.GetConfigForClient(chi); err != nil {
- c.sendAlert(alertInternalError)
- return nil, err
- } else if cfc != nil {
- configForClient = fromConfig(cfc)
- c.config = configForClient
- }
- }
- c.ticketKeys = originalConfig.ticketKeys(configForClient)
-
- clientVersions := clientHello.supportedVersions
- if len(clientHello.supportedVersions) == 0 {
- clientVersions = supportedVersionsFromMax(clientHello.vers)
- }
- if c.extraConfig.usesAlternativeRecordLayer() {
- // In QUIC, the client MUST NOT offer any old TLS versions.
- // Here, we can only check that none of the other supported versions of this library
- // (TLS 1.0 - TLS 1.2) is offered. We don't check for any SSL versions here.
- for _, ver := range clientVersions {
- if ver == VersionTLS13 {
- continue
- }
- for _, v := range supportedVersions {
- if ver == v {
- c.sendAlert(alertProtocolVersion)
- return nil, fmt.Errorf("tls: client offered old TLS version %#x", ver)
- }
- }
- }
- // Make the config we're using allows us to use TLS 1.3.
- if c.config.maxSupportedVersion() < VersionTLS13 {
- c.sendAlert(alertInternalError)
- return nil, errors.New("tls: MaxVersion prevents QUIC from using TLS 1.3")
- }
- }
- c.vers, ok = c.config.mutualVersion(clientVersions)
- if !ok {
- c.sendAlert(alertProtocolVersion)
- return nil, fmt.Errorf("tls: client offered only unsupported versions: %x", clientVersions)
- }
- c.haveVers = true
- c.in.version = c.vers
- c.out.version = c.vers
-
- return clientHello, nil
-}
-
-func (hs *serverHandshakeState) processClientHello() error {
- c := hs.c
-
- hs.hello = new(serverHelloMsg)
- hs.hello.vers = c.vers
-
- foundCompression := false
- // We only support null compression, so check that the client offered it.
- for _, compression := range hs.clientHello.compressionMethods {
- if compression == compressionNone {
- foundCompression = true
- break
- }
- }
-
- if !foundCompression {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: client does not support uncompressed connections")
- }
-
- hs.hello.random = make([]byte, 32)
- serverRandom := hs.hello.random
- // Downgrade protection canaries. See RFC 8446, Section 4.1.3.
- maxVers := c.config.maxSupportedVersion()
- if maxVers >= VersionTLS12 && c.vers < maxVers || testingOnlyForceDowngradeCanary {
- if c.vers == VersionTLS12 {
- copy(serverRandom[24:], downgradeCanaryTLS12)
- } else {
- copy(serverRandom[24:], downgradeCanaryTLS11)
- }
- serverRandom = serverRandom[:24]
- }
- _, err := io.ReadFull(c.config.rand(), serverRandom)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if len(hs.clientHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
-
- hs.hello.secureRenegotiationSupported = hs.clientHello.secureRenegotiationSupported
- hs.hello.compressionMethod = compressionNone
- if len(hs.clientHello.serverName) > 0 {
- c.serverName = hs.clientHello.serverName
- }
-
- selectedProto, err := negotiateALPN(c.config.NextProtos, hs.clientHello.alpnProtocols)
- if err != nil {
- c.sendAlert(alertNoApplicationProtocol)
- return err
- }
- hs.hello.alpnProtocol = selectedProto
- c.clientProtocol = selectedProto
-
- hs.cert, err = c.config.getCertificate(newClientHelloInfo(hs.ctx, c, hs.clientHello))
- if err != nil {
- if err == errNoCertificates {
- c.sendAlert(alertUnrecognizedName)
- } else {
- c.sendAlert(alertInternalError)
- }
- return err
- }
- if hs.clientHello.scts {
- hs.hello.scts = hs.cert.SignedCertificateTimestamps
- }
-
- hs.ecdheOk = supportsECDHE(c.config, hs.clientHello.supportedCurves, hs.clientHello.supportedPoints)
-
- if hs.ecdheOk {
- // Although omitting the ec_point_formats extension is permitted, some
- // old OpenSSL version will refuse to handshake if not present.
- //
- // Per RFC 4492, section 5.1.2, implementations MUST support the
- // uncompressed point format. See golang.org/issue/31943.
- hs.hello.supportedPoints = []uint8{pointFormatUncompressed}
- }
-
- if priv, ok := hs.cert.PrivateKey.(crypto.Signer); ok {
- switch priv.Public().(type) {
- case *ecdsa.PublicKey:
- hs.ecSignOk = true
- case ed25519.PublicKey:
- hs.ecSignOk = true
- case *rsa.PublicKey:
- hs.rsaSignOk = true
- default:
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: unsupported signing key type (%T)", priv.Public())
- }
- }
- if priv, ok := hs.cert.PrivateKey.(crypto.Decrypter); ok {
- switch priv.Public().(type) {
- case *rsa.PublicKey:
- hs.rsaDecryptOk = true
- default:
- c.sendAlert(alertInternalError)
- return fmt.Errorf("tls: unsupported decryption key type (%T)", priv.Public())
- }
- }
-
- return nil
-}
-
-// negotiateALPN picks a shared ALPN protocol that both sides support in server
-// preference order. If ALPN is not configured or the peer doesn't support it,
-// it returns "" and no error.
-func negotiateALPN(serverProtos, clientProtos []string) (string, error) {
- if len(serverProtos) == 0 || len(clientProtos) == 0 {
- return "", nil
- }
- var http11fallback bool
- for _, s := range serverProtos {
- for _, c := range clientProtos {
- if s == c {
- return s, nil
- }
- if s == "h2" && c == "http/1.1" {
- http11fallback = true
- }
- }
- }
- // As a special case, let http/1.1 clients connect to h2 servers as if they
- // didn't support ALPN. We used not to enforce protocol overlap, so over
- // time a number of HTTP servers were configured with only "h2", but
- // expected to accept connections from "http/1.1" clients. See Issue 46310.
- if http11fallback {
- return "", nil
- }
- return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos)
-}
-
-// supportsECDHE returns whether ECDHE key exchanges can be used with this
-// pre-TLS 1.3 client.
-func supportsECDHE(c *config, supportedCurves []CurveID, supportedPoints []uint8) bool {
- supportsCurve := false
- for _, curve := range supportedCurves {
- if c.supportsCurve(curve) {
- supportsCurve = true
- break
- }
- }
-
- supportsPointFormat := false
- for _, pointFormat := range supportedPoints {
- if pointFormat == pointFormatUncompressed {
- supportsPointFormat = true
- break
- }
- }
-
- return supportsCurve && supportsPointFormat
-}
-
-func (hs *serverHandshakeState) pickCipherSuite() error {
- c := hs.c
-
- preferenceOrder := cipherSuitesPreferenceOrder
- if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceOrder = cipherSuitesPreferenceOrderNoAES
- }
-
- configCipherSuites := c.config.cipherSuites()
- preferenceList := make([]uint16, 0, len(configCipherSuites))
- for _, suiteID := range preferenceOrder {
- for _, id := range configCipherSuites {
- if id == suiteID {
- preferenceList = append(preferenceList, id)
- break
- }
- }
- }
-
- hs.suite = selectCipherSuite(preferenceList, hs.clientHello.cipherSuites, hs.cipherSuiteOk)
- if hs.suite == nil {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no cipher suite supported by both client and server")
- }
- c.cipherSuite = hs.suite.id
-
- for _, id := range hs.clientHello.cipherSuites {
- if id == TLS_FALLBACK_SCSV {
- // The client is doing a fallback connection. See RFC 7507.
- if hs.clientHello.vers < c.config.maxSupportedVersion() {
- c.sendAlert(alertInappropriateFallback)
- return errors.New("tls: client using inappropriate protocol fallback")
- }
- break
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeState) cipherSuiteOk(c *cipherSuite) bool {
- if c.flags&suiteECDHE != 0 {
- if !hs.ecdheOk {
- return false
- }
- if c.flags&suiteECSign != 0 {
- if !hs.ecSignOk {
- return false
- }
- } else if !hs.rsaSignOk {
- return false
- }
- } else if !hs.rsaDecryptOk {
- return false
- }
- if hs.c.vers < VersionTLS12 && c.flags&suiteTLS12 != 0 {
- return false
- }
- return true
-}
-
-// checkForResumption reports whether we should perform resumption on this connection.
-func (hs *serverHandshakeState) checkForResumption() bool {
- c := hs.c
-
- if c.config.SessionTicketsDisabled {
- return false
- }
-
- plaintext, usedOldKey := c.decryptTicket(hs.clientHello.sessionTicket)
- if plaintext == nil {
- return false
- }
- hs.sessionState = &sessionState{usedOldKey: usedOldKey}
- ok := hs.sessionState.unmarshal(plaintext)
- if !ok {
- return false
- }
-
- createdAt := time.Unix(int64(hs.sessionState.createdAt), 0)
- if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
- return false
- }
-
- // Never resume a session for a different TLS version.
- if c.vers != hs.sessionState.vers {
- return false
- }
-
- cipherSuiteOk := false
- // Check that the client is still offering the ciphersuite in the session.
- for _, id := range hs.clientHello.cipherSuites {
- if id == hs.sessionState.cipherSuite {
- cipherSuiteOk = true
- break
- }
- }
- if !cipherSuiteOk {
- return false
- }
-
- // Check that we also support the ciphersuite from the session.
- hs.suite = selectCipherSuite([]uint16{hs.sessionState.cipherSuite},
- c.config.cipherSuites(), hs.cipherSuiteOk)
- if hs.suite == nil {
- return false
- }
-
- sessionHasClientCerts := len(hs.sessionState.certificates) != 0
- needClientCerts := requiresClientCert(c.config.ClientAuth)
- if needClientCerts && !sessionHasClientCerts {
- return false
- }
- if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
- return false
- }
-
- return true
-}
-
-func (hs *serverHandshakeState) doResumeHandshake() error {
- c := hs.c
-
- hs.hello.cipherSuite = hs.suite.id
- c.cipherSuite = hs.suite.id
- // We echo the client's session ID in the ServerHello to let it know
- // that we're doing a resumption.
- hs.hello.sessionId = hs.clientHello.sessionId
- hs.hello.ticketSupported = hs.sessionState.usedOldKey
- hs.finishedHash = newFinishedHash(c.vers, hs.suite)
- hs.finishedHash.discardHandshakeBuffer()
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- if err := c.processCertsFromClient(Certificate{
- Certificate: hs.sessionState.certificates,
- }); err != nil {
- return err
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- hs.masterSecret = hs.sessionState.masterSecret
-
- return nil
-}
-
-func (hs *serverHandshakeState) doFullHandshake() error {
- c := hs.c
-
- if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 {
- hs.hello.ocspStapling = true
- }
-
- hs.hello.ticketSupported = hs.clientHello.ticketSupported && !c.config.SessionTicketsDisabled
- hs.hello.cipherSuite = hs.suite.id
-
- hs.finishedHash = newFinishedHash(hs.c.vers, hs.suite)
- if c.config.ClientAuth == NoClientCert {
- // No need to keep a full record of the handshake if client
- // certificates won't be used.
- hs.finishedHash.discardHandshakeBuffer()
- }
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- certMsg := new(certificateMsg)
- certMsg.certificates = hs.cert.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- if hs.hello.ocspStapling {
- certStatus := new(certificateStatusMsg)
- certStatus.response = hs.cert.OCSPStaple
- hs.finishedHash.Write(certStatus.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
- return err
- }
- }
-
- keyAgreement := hs.suite.ka(c.vers)
- skx, err := keyAgreement.generateServerKeyExchange(c.config, hs.cert, hs.clientHello, hs.hello)
- if err != nil {
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- if skx != nil {
- hs.finishedHash.Write(skx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
- return err
- }
- }
-
- var certReq *certificateRequestMsg
- if c.config.ClientAuth >= RequestClientCert {
- // Request a client certificate
- certReq = new(certificateRequestMsg)
- certReq.certificateTypes = []byte{
- byte(certTypeRSASign),
- byte(certTypeECDSASign),
- }
- if c.vers >= VersionTLS12 {
- certReq.hasSignatureAlgorithm = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- }
-
- // An empty list of certificateAuthorities signals to
- // the client that it may send any certificate in response
- // to our request. When we know the CAs we trust, then
- // we can send them down, so that the client can choose
- // an appropriate certificate to give to us.
- if c.config.ClientCAs != nil {
- certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
- }
- hs.finishedHash.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
- return err
- }
- }
-
- helloDone := new(serverHelloDoneMsg)
- hs.finishedHash.Write(helloDone.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
- return err
- }
-
- if _, err := c.flush(); err != nil {
- return err
- }
-
- var pub crypto.PublicKey // public key for client auth, if any
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- // If we requested a client certificate, then the client must send a
- // certificate message, even if it's empty.
- if c.config.ClientAuth >= RequestClientCert {
- certMsg, ok := msg.(*certificateMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.finishedHash.Write(certMsg.marshal())
-
- if err := c.processCertsFromClient(Certificate{
- Certificate: certMsg.certificates,
- }); err != nil {
- return err
- }
- if len(certMsg.certificates) != 0 {
- pub = c.peerCertificates[0].PublicKey
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- // Get client key exchange
- ckx, ok := msg.(*clientKeyExchangeMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(ckx, msg)
- }
- hs.finishedHash.Write(ckx.marshal())
-
- preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
- if err != nil {
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.clientHello.random, hs.hello.random)
- if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.clientHello.random, hs.masterSecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- // If we received a client cert in response to our certificate request message,
- // the client will send us a certificateVerifyMsg immediately after the
- // clientKeyExchangeMsg. This message is a digest of all preceding
- // handshake-layer messages that is signed using the private key corresponding
- // to the client's certificate. This allows us to verify that the client is in
- // possession of the private key of the certificate.
- if len(c.peerCertificates) > 0 {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- var sigType uint8
- var sigHash crypto.Hash
- if c.vers >= VersionTLS12 {
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, certReq.supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(pub)
- if err != nil {
- c.sendAlert(alertIllegalParameter)
- return err
- }
- }
-
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
- if err := verifyHandshakeSignature(sigType, pub, sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the client certificate: " + err.Error())
- }
-
- hs.finishedHash.Write(certVerify.marshal())
- }
-
- hs.finishedHash.discardHandshakeBuffer()
-
- return nil
-}
-
-func (hs *serverHandshakeState) establishKeys() error {
- c := hs.c
-
- clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV :=
- keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen)
-
- var clientCipher, serverCipher interface{}
- var clientHash, serverHash hash.Hash
-
- if hs.suite.aead == nil {
- clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */)
- clientHash = hs.suite.mac(clientMAC)
- serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */)
- serverHash = hs.suite.mac(serverMAC)
- } else {
- clientCipher = hs.suite.aead(clientKey, clientIV)
- serverCipher = hs.suite.aead(serverKey, serverIV)
- }
-
- c.in.prepareCipherSpec(c.vers, clientCipher, clientHash)
- c.out.prepareCipherSpec(c.vers, serverCipher, serverHash)
-
- return nil
-}
-
-func (hs *serverHandshakeState) readFinished(out []byte) error {
- c := hs.c
-
- if err := c.readChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
- clientFinished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(clientFinished, msg)
- }
-
- verify := hs.finishedHash.clientSum(hs.masterSecret)
- if len(verify) != len(clientFinished.verifyData) ||
- subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: client's Finished message is incorrect")
- }
-
- hs.finishedHash.Write(clientFinished.marshal())
- copy(out, verify)
- return nil
-}
-
-func (hs *serverHandshakeState) sendSessionTicket() error {
- // ticketSupported is set in a resumption handshake if the
- // ticket from the client was encrypted with an old session
- // ticket key and thus a refreshed ticket should be sent.
- if !hs.hello.ticketSupported {
- return nil
- }
-
- c := hs.c
- m := new(newSessionTicketMsg)
-
- createdAt := uint64(c.config.time().Unix())
- if hs.sessionState != nil {
- // If this is re-wrapping an old key, then keep
- // the original time it was created.
- createdAt = hs.sessionState.createdAt
- }
-
- var certsFromClient [][]byte
- for _, cert := range c.peerCertificates {
- certsFromClient = append(certsFromClient, cert.Raw)
- }
- state := sessionState{
- vers: c.vers,
- cipherSuite: hs.suite.id,
- createdAt: createdAt,
- masterSecret: hs.masterSecret,
- certificates: certsFromClient,
- }
- var err error
- m.ticket, err = c.encryptTicket(state.marshal())
- if err != nil {
- return err
- }
-
- hs.finishedHash.Write(m.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeState) sendFinished(out []byte) error {
- c := hs.c
-
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
- return err
- }
-
- finished := new(finishedMsg)
- finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- copy(out, finished.verifyData)
-
- return nil
-}
-
-// processCertsFromClient takes a chain of client certificates either from a
-// Certificates message or from a sessionState and verifies them. It returns
-// the public key of the leaf certificate.
-func (c *Conn) processCertsFromClient(certificate Certificate) error {
- certificates := certificate.Certificate
- certs := make([]*x509.Certificate, len(certificates))
- var err error
- for i, asn1Data := range certificates {
- if certs[i], err = x509.ParseCertificate(asn1Data); err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to parse client certificate: " + err.Error())
- }
- }
-
- if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: client didn't provide a certificate")
- }
-
- if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 {
- opts := x509.VerifyOptions{
- Roots: c.config.ClientCAs,
- CurrentTime: c.config.time(),
- Intermediates: x509.NewCertPool(),
- KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
- }
-
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
- }
-
- chains, err := certs[0].Verify(opts)
- if err != nil {
- c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to verify client certificate: " + err.Error())
- }
-
- c.verifiedChains = chains
- }
-
- c.peerCertificates = certs
- c.ocspResponse = certificate.OCSPStaple
- c.scts = certificate.SignedCertificateTimestamps
-
- if len(certs) > 0 {
- switch certs[0].PublicKey.(type) {
- case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
- default:
- c.sendAlert(alertUnsupportedCertificate)
- return fmt.Errorf("tls: client certificate contains an unsupported public key of type %T", certs[0].PublicKey)
- }
- }
-
- if c.config.VerifyPeerCertificate != nil {
- if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- return nil
-}
-
-func newClientHelloInfo(ctx context.Context, c *Conn, clientHello *clientHelloMsg) *ClientHelloInfo {
- supportedVersions := clientHello.supportedVersions
- if len(clientHello.supportedVersions) == 0 {
- supportedVersions = supportedVersionsFromMax(clientHello.vers)
- }
-
- return toClientHelloInfo(&clientHelloInfo{
- CipherSuites: clientHello.cipherSuites,
- ServerName: clientHello.serverName,
- SupportedCurves: clientHello.supportedCurves,
- SupportedPoints: clientHello.supportedPoints,
- SignatureSchemes: clientHello.supportedSignatureAlgorithms,
- SupportedProtos: clientHello.alpnProtocols,
- SupportedVersions: supportedVersions,
- Conn: c.conn,
- config: toConfig(c.config),
- ctx: ctx,
- })
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_server_tls13.go b/vendor/github.com/marten-seemann/qtls-go1-17/handshake_server_tls13.go
deleted file mode 100644
index 0c200605e..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_server_tls13.go
+++ /dev/null
@@ -1,896 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/hmac"
- "crypto/rsa"
- "errors"
- "hash"
- "io"
- "sync/atomic"
- "time"
-)
-
-// maxClientPSKIdentities is the number of client PSK identities the server will
-// attempt to validate. It will ignore the rest not to let cheap ClientHello
-// messages cause too much work in session ticket decryption attempts.
-const maxClientPSKIdentities = 5
-
-type serverHandshakeStateTLS13 struct {
- c *Conn
- ctx context.Context
- clientHello *clientHelloMsg
- hello *serverHelloMsg
- alpnNegotiationErr error
- encryptedExtensions *encryptedExtensionsMsg
- sentDummyCCS bool
- usingPSK bool
- suite *cipherSuiteTLS13
- cert *Certificate
- sigAlg SignatureScheme
- earlySecret []byte
- sharedKey []byte
- handshakeSecret []byte
- masterSecret []byte
- trafficSecret []byte // client_application_traffic_secret_0
- transcript hash.Hash
- clientFinished []byte
-}
-
-func (hs *serverHandshakeStateTLS13) handshake() error {
- c := hs.c
-
- // For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2.
- if err := hs.processClientHello(); err != nil {
- return err
- }
- if err := hs.checkForResumption(); err != nil {
- return err
- }
- if err := hs.pickCertificate(); err != nil {
- return err
- }
- c.buffering = true
- if err := hs.sendServerParameters(); err != nil {
- return err
- }
- if err := hs.sendServerCertificate(); err != nil {
- return err
- }
- if err := hs.sendServerFinished(); err != nil {
- return err
- }
- // Note that at this point we could start sending application data without
- // waiting for the client's second flight, but the application might not
- // expect the lack of replay protection of the ClientHello parameters.
- if _, err := c.flush(); err != nil {
- return err
- }
- if err := hs.readClientCertificate(); err != nil {
- return err
- }
- if err := hs.readClientFinished(); err != nil {
- return err
- }
-
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) processClientHello() error {
- c := hs.c
-
- hs.hello = new(serverHelloMsg)
- hs.encryptedExtensions = new(encryptedExtensionsMsg)
-
- // TLS 1.3 froze the ServerHello.legacy_version field, and uses
- // supported_versions instead. See RFC 8446, sections 4.1.3 and 4.2.1.
- hs.hello.vers = VersionTLS12
- hs.hello.supportedVersion = c.vers
-
- if len(hs.clientHello.supportedVersions) == 0 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client used the legacy version field to negotiate TLS 1.3")
- }
-
- // Abort if the client is doing a fallback and landing lower than what we
- // support. See RFC 7507, which however does not specify the interaction
- // with supported_versions. The only difference is that with
- // supported_versions a client has a chance to attempt a [TLS 1.2, TLS 1.4]
- // handshake in case TLS 1.3 is broken but 1.2 is not. Alas, in that case,
- // it will have to drop the TLS_FALLBACK_SCSV protection if it falls back to
- // TLS 1.2, because a TLS 1.3 server would abort here. The situation before
- // supported_versions was not better because there was just no way to do a
- // TLS 1.4 handshake without risking the server selecting TLS 1.3.
- for _, id := range hs.clientHello.cipherSuites {
- if id == TLS_FALLBACK_SCSV {
- // Use c.vers instead of max(supported_versions) because an attacker
- // could defeat this by adding an arbitrary high version otherwise.
- if c.vers < c.config.maxSupportedVersion() {
- c.sendAlert(alertInappropriateFallback)
- return errors.New("tls: client using inappropriate protocol fallback")
- }
- break
- }
- }
-
- if len(hs.clientHello.compressionMethods) != 1 ||
- hs.clientHello.compressionMethods[0] != compressionNone {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: TLS 1.3 client supports illegal compression methods")
- }
-
- hs.hello.random = make([]byte, 32)
- if _, err := io.ReadFull(c.config.rand(), hs.hello.random); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if len(hs.clientHello.secureRenegotiation) != 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: initial handshake had non-empty renegotiation extension")
- }
-
- hs.hello.sessionId = hs.clientHello.sessionId
- hs.hello.compressionMethod = compressionNone
-
- if hs.suite == nil {
- var preferenceList []uint16
- for _, suiteID := range c.config.CipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- preferenceList = append(preferenceList, suiteID)
- break
- }
- }
- }
- if len(preferenceList) == 0 {
- preferenceList = defaultCipherSuitesTLS13
- if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceList = defaultCipherSuitesTLS13NoAES
- }
- }
- for _, suiteID := range preferenceList {
- hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
- if hs.suite != nil {
- break
- }
- }
- }
- if hs.suite == nil {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no cipher suite supported by both client and server")
- }
- c.cipherSuite = hs.suite.id
- hs.hello.cipherSuite = hs.suite.id
- hs.transcript = hs.suite.hash.New()
-
- // Pick the ECDHE group in server preference order, but give priority to
- // groups with a key share, to avoid a HelloRetryRequest round-trip.
- var selectedGroup CurveID
- var clientKeyShare *keyShare
-GroupSelection:
- for _, preferredGroup := range c.config.curvePreferences() {
- for _, ks := range hs.clientHello.keyShares {
- if ks.group == preferredGroup {
- selectedGroup = ks.group
- clientKeyShare = &ks
- break GroupSelection
- }
- }
- if selectedGroup != 0 {
- continue
- }
- for _, group := range hs.clientHello.supportedCurves {
- if group == preferredGroup {
- selectedGroup = group
- break
- }
- }
- }
- if selectedGroup == 0 {
- c.sendAlert(alertHandshakeFailure)
- return errors.New("tls: no ECDHE curve supported by both client and server")
- }
- if clientKeyShare == nil {
- if err := hs.doHelloRetryRequest(selectedGroup); err != nil {
- return err
- }
- clientKeyShare = &hs.clientHello.keyShares[0]
- }
-
- if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok {
- c.sendAlert(alertInternalError)
- return errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err := generateECDHEParameters(c.config.rand(), selectedGroup)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()}
- hs.sharedKey = params.SharedKey(clientKeyShare.data)
- if hs.sharedKey == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid client key share")
- }
-
- c.serverName = hs.clientHello.serverName
-
- if c.extraConfig != nil && c.extraConfig.ReceivedExtensions != nil {
- c.extraConfig.ReceivedExtensions(typeClientHello, hs.clientHello.additionalExtensions)
- }
-
- selectedProto, err := negotiateALPN(c.config.NextProtos, hs.clientHello.alpnProtocols)
- if err != nil {
- hs.alpnNegotiationErr = err
- }
- hs.encryptedExtensions.alpnProtocol = selectedProto
- c.clientProtocol = selectedProto
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) checkForResumption() error {
- c := hs.c
-
- if c.config.SessionTicketsDisabled {
- return nil
- }
-
- modeOK := false
- for _, mode := range hs.clientHello.pskModes {
- if mode == pskModeDHE {
- modeOK = true
- break
- }
- }
- if !modeOK {
- return nil
- }
-
- if len(hs.clientHello.pskIdentities) != len(hs.clientHello.pskBinders) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid or missing PSK binders")
- }
- if len(hs.clientHello.pskIdentities) == 0 {
- return nil
- }
-
- for i, identity := range hs.clientHello.pskIdentities {
- if i >= maxClientPSKIdentities {
- break
- }
-
- plaintext, _ := c.decryptTicket(identity.label)
- if plaintext == nil {
- continue
- }
- sessionState := new(sessionStateTLS13)
- if ok := sessionState.unmarshal(plaintext); !ok {
- continue
- }
-
- if hs.clientHello.earlyData {
- if sessionState.maxEarlyData == 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: client sent unexpected early data")
- }
-
- if hs.alpnNegotiationErr == nil && sessionState.alpn == c.clientProtocol &&
- c.extraConfig != nil && c.extraConfig.MaxEarlyData > 0 &&
- c.extraConfig.Accept0RTT != nil && c.extraConfig.Accept0RTT(sessionState.appData) {
- hs.encryptedExtensions.earlyData = true
- c.used0RTT = true
- }
- }
-
- createdAt := time.Unix(int64(sessionState.createdAt), 0)
- if c.config.time().Sub(createdAt) > maxSessionTicketLifetime {
- continue
- }
-
- // We don't check the obfuscated ticket age because it's affected by
- // clock skew and it's only a freshness signal useful for shrinking the
- // window for replay attacks, which don't affect us as we don't do 0-RTT.
-
- pskSuite := cipherSuiteTLS13ByID(sessionState.cipherSuite)
- if pskSuite == nil || pskSuite.hash != hs.suite.hash {
- continue
- }
-
- // PSK connections don't re-establish client certificates, but carry
- // them over in the session ticket. Ensure the presence of client certs
- // in the ticket is consistent with the configured requirements.
- sessionHasClientCerts := len(sessionState.certificate.Certificate) != 0
- needClientCerts := requiresClientCert(c.config.ClientAuth)
- if needClientCerts && !sessionHasClientCerts {
- continue
- }
- if sessionHasClientCerts && c.config.ClientAuth == NoClientCert {
- continue
- }
-
- psk := hs.suite.expandLabel(sessionState.resumptionSecret, "resumption",
- nil, hs.suite.hash.Size())
- hs.earlySecret = hs.suite.extract(psk, nil)
- binderKey := hs.suite.deriveSecret(hs.earlySecret, resumptionBinderLabel, nil)
- // Clone the transcript in case a HelloRetryRequest was recorded.
- transcript := cloneHash(hs.transcript, hs.suite.hash)
- if transcript == nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: internal error: failed to clone hash")
- }
- transcript.Write(hs.clientHello.marshalWithoutBinders())
- pskBinder := hs.suite.finishedHash(binderKey, transcript)
- if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid PSK binder")
- }
-
- c.didResume = true
- if err := c.processCertsFromClient(sessionState.certificate); err != nil {
- return err
- }
-
- h := cloneHash(hs.transcript, hs.suite.hash)
- h.Write(hs.clientHello.marshal())
- if hs.encryptedExtensions.earlyData {
- clientEarlySecret := hs.suite.deriveSecret(hs.earlySecret, "c e traffic", h)
- c.in.exportKey(Encryption0RTT, hs.suite, clientEarlySecret)
- if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hs.clientHello.random, clientEarlySecret); err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- }
-
- hs.hello.selectedIdentityPresent = true
- hs.hello.selectedIdentity = uint16(i)
- hs.usingPSK = true
- return nil
- }
-
- return nil
-}
-
-// cloneHash uses the encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
-// interfaces implemented by standard library hashes to clone the state of in
-// to a new instance of h. It returns nil if the operation fails.
-func cloneHash(in hash.Hash, h crypto.Hash) hash.Hash {
- // Recreate the interface to avoid importing encoding.
- type binaryMarshaler interface {
- MarshalBinary() (data []byte, err error)
- UnmarshalBinary(data []byte) error
- }
- marshaler, ok := in.(binaryMarshaler)
- if !ok {
- return nil
- }
- state, err := marshaler.MarshalBinary()
- if err != nil {
- return nil
- }
- out := h.New()
- unmarshaler, ok := out.(binaryMarshaler)
- if !ok {
- return nil
- }
- if err := unmarshaler.UnmarshalBinary(state); err != nil {
- return nil
- }
- return out
-}
-
-func (hs *serverHandshakeStateTLS13) pickCertificate() error {
- c := hs.c
-
- // Only one of PSK and certificates are used at a time.
- if hs.usingPSK {
- return nil
- }
-
- // signature_algorithms is required in TLS 1.3. See RFC 8446, Section 4.2.3.
- if len(hs.clientHello.supportedSignatureAlgorithms) == 0 {
- return c.sendAlert(alertMissingExtension)
- }
-
- certificate, err := c.config.getCertificate(newClientHelloInfo(hs.ctx, c, hs.clientHello))
- if err != nil {
- if err == errNoCertificates {
- c.sendAlert(alertUnrecognizedName)
- } else {
- c.sendAlert(alertInternalError)
- }
- return err
- }
- hs.sigAlg, err = selectSignatureScheme(c.vers, certificate, hs.clientHello.supportedSignatureAlgorithms)
- if err != nil {
- // getCertificate returned a certificate that is unsupported or
- // incompatible with the client's signature algorithms.
- c.sendAlert(alertHandshakeFailure)
- return err
- }
- hs.cert = certificate
-
- return nil
-}
-
-// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
-// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
-func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
- if hs.sentDummyCCS {
- return nil
- }
- hs.sentDummyCCS = true
-
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
-}
-
-func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
- c := hs.c
-
- // The first ClientHello gets double-hashed into the transcript upon a
- // HelloRetryRequest. See RFC 8446, Section 4.4.1.
- hs.transcript.Write(hs.clientHello.marshal())
- chHash := hs.transcript.Sum(nil)
- hs.transcript.Reset()
- hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- hs.transcript.Write(chHash)
-
- helloRetryRequest := &serverHelloMsg{
- vers: hs.hello.vers,
- random: helloRetryRequestRandom,
- sessionId: hs.hello.sessionId,
- cipherSuite: hs.hello.cipherSuite,
- compressionMethod: hs.hello.compressionMethod,
- supportedVersion: hs.hello.supportedVersion,
- selectedGroup: selectedGroup,
- }
-
- hs.transcript.Write(helloRetryRequest.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
- return err
- }
-
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- clientHello, ok := msg.(*clientHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(clientHello, msg)
- }
-
- if len(clientHello.keyShares) != 1 || clientHello.keyShares[0].group != selectedGroup {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client sent invalid key share in second ClientHello")
- }
-
- if clientHello.earlyData {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client indicated early data in second ClientHello")
- }
-
- if illegalClientHelloChange(clientHello, hs.clientHello) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client illegally modified second ClientHello")
- }
-
- if clientHello.earlyData {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client offered 0-RTT data in second ClientHello")
- }
-
- hs.clientHello = clientHello
- return nil
-}
-
-// illegalClientHelloChange reports whether the two ClientHello messages are
-// different, with the exception of the changes allowed before and after a
-// HelloRetryRequest. See RFC 8446, Section 4.1.2.
-func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool {
- if len(ch.supportedVersions) != len(ch1.supportedVersions) ||
- len(ch.cipherSuites) != len(ch1.cipherSuites) ||
- len(ch.supportedCurves) != len(ch1.supportedCurves) ||
- len(ch.supportedSignatureAlgorithms) != len(ch1.supportedSignatureAlgorithms) ||
- len(ch.supportedSignatureAlgorithmsCert) != len(ch1.supportedSignatureAlgorithmsCert) ||
- len(ch.alpnProtocols) != len(ch1.alpnProtocols) {
- return true
- }
- for i := range ch.supportedVersions {
- if ch.supportedVersions[i] != ch1.supportedVersions[i] {
- return true
- }
- }
- for i := range ch.cipherSuites {
- if ch.cipherSuites[i] != ch1.cipherSuites[i] {
- return true
- }
- }
- for i := range ch.supportedCurves {
- if ch.supportedCurves[i] != ch1.supportedCurves[i] {
- return true
- }
- }
- for i := range ch.supportedSignatureAlgorithms {
- if ch.supportedSignatureAlgorithms[i] != ch1.supportedSignatureAlgorithms[i] {
- return true
- }
- }
- for i := range ch.supportedSignatureAlgorithmsCert {
- if ch.supportedSignatureAlgorithmsCert[i] != ch1.supportedSignatureAlgorithmsCert[i] {
- return true
- }
- }
- for i := range ch.alpnProtocols {
- if ch.alpnProtocols[i] != ch1.alpnProtocols[i] {
- return true
- }
- }
- return ch.vers != ch1.vers ||
- !bytes.Equal(ch.random, ch1.random) ||
- !bytes.Equal(ch.sessionId, ch1.sessionId) ||
- !bytes.Equal(ch.compressionMethods, ch1.compressionMethods) ||
- ch.serverName != ch1.serverName ||
- ch.ocspStapling != ch1.ocspStapling ||
- !bytes.Equal(ch.supportedPoints, ch1.supportedPoints) ||
- ch.ticketSupported != ch1.ticketSupported ||
- !bytes.Equal(ch.sessionTicket, ch1.sessionTicket) ||
- ch.secureRenegotiationSupported != ch1.secureRenegotiationSupported ||
- !bytes.Equal(ch.secureRenegotiation, ch1.secureRenegotiation) ||
- ch.scts != ch1.scts ||
- !bytes.Equal(ch.cookie, ch1.cookie) ||
- !bytes.Equal(ch.pskModes, ch1.pskModes)
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
- c := hs.c
-
- hs.transcript.Write(hs.clientHello.marshal())
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
-
- earlySecret := hs.earlySecret
- if earlySecret == nil {
- earlySecret = hs.suite.extract(nil, nil)
- }
- hs.handshakeSecret = hs.suite.extract(hs.sharedKey,
- hs.suite.deriveSecret(earlySecret, "derived", nil))
-
- clientSecret := hs.suite.deriveSecret(hs.handshakeSecret,
- clientHandshakeTrafficLabel, hs.transcript)
- c.in.exportKey(EncryptionHandshake, hs.suite, clientSecret)
- c.in.setTrafficSecret(hs.suite, clientSecret)
- serverSecret := hs.suite.deriveSecret(hs.handshakeSecret,
- serverHandshakeTrafficLabel, hs.transcript)
- c.out.exportKey(EncryptionHandshake, hs.suite, serverSecret)
- c.out.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.clientHello.random, clientSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.clientHello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- if hs.alpnNegotiationErr != nil {
- c.sendAlert(alertNoApplicationProtocol)
- return hs.alpnNegotiationErr
- }
- if hs.c.extraConfig != nil && hs.c.extraConfig.GetExtensions != nil {
- hs.encryptedExtensions.additionalExtensions = hs.c.extraConfig.GetExtensions(typeEncryptedExtensions)
- }
-
- hs.transcript.Write(hs.encryptedExtensions.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.encryptedExtensions.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) requestClientCert() bool {
- return hs.c.config.ClientAuth >= RequestClientCert && !hs.usingPSK
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
- c := hs.c
-
- // Only one of PSK and certificates are used at a time.
- if hs.usingPSK {
- return nil
- }
-
- if hs.requestClientCert() {
- // Request a client certificate
- certReq := new(certificateRequestMsgTLS13)
- certReq.ocspStapling = true
- certReq.scts = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
- if c.config.ClientCAs != nil {
- certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
- }
-
- hs.transcript.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
- return err
- }
- }
-
- certMsg := new(certificateMsgTLS13)
-
- certMsg.certificate = *hs.cert
- certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
- certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
-
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- certVerifyMsg := new(certificateVerifyMsg)
- certVerifyMsg.hasSignatureAlgorithm = true
- certVerifyMsg.signatureAlgorithm = hs.sigAlg
-
- sigType, sigHash, err := typeAndHashFromSignatureScheme(hs.sigAlg)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
-
- signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := hs.cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- public := hs.cert.PrivateKey.(crypto.Signer).Public()
- if rsaKey, ok := public.(*rsa.PublicKey); ok && sigType == signatureRSAPSS &&
- rsaKey.N.BitLen()/8 < sigHash.Size()*2+2 { // key too small for RSA-PSS
- c.sendAlert(alertHandshakeFailure)
- } else {
- c.sendAlert(alertInternalError)
- }
- return errors.New("tls: failed to sign handshake: " + err.Error())
- }
- certVerifyMsg.signature = sig
-
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
- c := hs.c
-
- finished := &finishedMsg{
- verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
- }
-
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- // Derive secrets that take context through the server Finished.
-
- hs.masterSecret = hs.suite.extract(nil,
- hs.suite.deriveSecret(hs.handshakeSecret, "derived", nil))
-
- hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
- clientApplicationTrafficLabel, hs.transcript)
- serverSecret := hs.suite.deriveSecret(hs.masterSecret,
- serverApplicationTrafficLabel, hs.transcript)
- c.out.exportKey(EncryptionApplication, hs.suite, serverSecret)
- c.out.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientTraffic, hs.clientHello.random, hs.trafficSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.clientHello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
-
- // If we did not request client certificates, at this point we can
- // precompute the client finished and roll the transcript forward to send
- // session tickets in our first flight.
- if !hs.requestClientCert() {
- if err := hs.sendSessionTickets(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) shouldSendSessionTickets() bool {
- if hs.c.config.SessionTicketsDisabled {
- return false
- }
-
- // Don't send tickets the client wouldn't use. See RFC 8446, Section 4.2.9.
- for _, pskMode := range hs.clientHello.pskModes {
- if pskMode == pskModeDHE {
- return true
- }
- }
- return false
-}
-
-func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
- c := hs.c
-
- hs.clientFinished = hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
- finishedMsg := &finishedMsg{
- verifyData: hs.clientFinished,
- }
- hs.transcript.Write(finishedMsg.marshal())
-
- if !hs.shouldSendSessionTickets() {
- return nil
- }
-
- c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret,
- resumptionLabel, hs.transcript)
-
- // Don't send session tickets when the alternative record layer is set.
- // Instead, save the resumption secret on the Conn.
- // Session tickets can then be generated by calling Conn.GetSessionTicket().
- if hs.c.extraConfig != nil && hs.c.extraConfig.AlternativeRecordLayer != nil {
- return nil
- }
-
- m, err := hs.c.getSessionTicketMsg(nil)
- if err != nil {
- return err
- }
-
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
- c := hs.c
-
- if !hs.requestClientCert() {
- // Make sure the connection is still being verified whether or not
- // the server requested a client certificate.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- return nil
- }
-
- // If we requested a client certificate, then the client must send a
- // certificate message. If it's empty, no CertificateVerify is sent.
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- certMsg, ok := msg.(*certificateMsgTLS13)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- hs.transcript.Write(certMsg.marshal())
-
- if err := c.processCertsFromClient(certMsg.certificate); err != nil {
- return err
- }
-
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
-
- if len(certMsg.certificate.Certificate) != 0 {
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- // See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: client certificate used with invalid signature algorithm")
- }
- signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
- if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
- sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the client certificate: " + err.Error())
- }
-
- hs.transcript.Write(certVerify.marshal())
- }
-
- // If we waited until the client certificates to send session tickets, we
- // are ready to do it now.
- if err := hs.sendSessionTickets(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *serverHandshakeStateTLS13) readClientFinished() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- finished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(finished, msg)
- }
-
- if !hmac.Equal(hs.clientFinished, finished.verifyData) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid client finished hash")
- }
-
- c.in.exportKey(EncryptionApplication, hs.suite, hs.trafficSecret)
- c.in.setTrafficSecret(hs.suite, hs.trafficSecret)
-
- return nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/prf.go b/vendor/github.com/marten-seemann/qtls-go1-17/prf.go
deleted file mode 100644
index 9eb0221a0..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-17/prf.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/hmac"
- "crypto/md5"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/sha512"
- "errors"
- "fmt"
- "hash"
-)
-
-// Split a premaster secret in two as specified in RFC 4346, Section 5.
-func splitPreMasterSecret(secret []byte) (s1, s2 []byte) {
- s1 = secret[0 : (len(secret)+1)/2]
- s2 = secret[len(secret)/2:]
- return
-}
-
-// pHash implements the P_hash function, as defined in RFC 4346, Section 5.
-func pHash(result, secret, seed []byte, hash func() hash.Hash) {
- h := hmac.New(hash, secret)
- h.Write(seed)
- a := h.Sum(nil)
-
- j := 0
- for j < len(result) {
- h.Reset()
- h.Write(a)
- h.Write(seed)
- b := h.Sum(nil)
- copy(result[j:], b)
- j += len(b)
-
- h.Reset()
- h.Write(a)
- a = h.Sum(nil)
- }
-}
-
-// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, Section 5.
-func prf10(result, secret, label, seed []byte) {
- hashSHA1 := sha1.New
- hashMD5 := md5.New
-
- labelAndSeed := make([]byte, len(label)+len(seed))
- copy(labelAndSeed, label)
- copy(labelAndSeed[len(label):], seed)
-
- s1, s2 := splitPreMasterSecret(secret)
- pHash(result, s1, labelAndSeed, hashMD5)
- result2 := make([]byte, len(result))
- pHash(result2, s2, labelAndSeed, hashSHA1)
-
- for i, b := range result2 {
- result[i] ^= b
- }
-}
-
-// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, Section 5.
-func prf12(hashFunc func() hash.Hash) func(result, secret, label, seed []byte) {
- return func(result, secret, label, seed []byte) {
- labelAndSeed := make([]byte, len(label)+len(seed))
- copy(labelAndSeed, label)
- copy(labelAndSeed[len(label):], seed)
-
- pHash(result, secret, labelAndSeed, hashFunc)
- }
-}
-
-const (
- masterSecretLength = 48 // Length of a master secret in TLS 1.1.
- finishedVerifyLength = 12 // Length of verify_data in a Finished message.
-)
-
-var masterSecretLabel = []byte("master secret")
-var keyExpansionLabel = []byte("key expansion")
-var clientFinishedLabel = []byte("client finished")
-var serverFinishedLabel = []byte("server finished")
-
-func prfAndHashForVersion(version uint16, suite *cipherSuite) (func(result, secret, label, seed []byte), crypto.Hash) {
- switch version {
- case VersionTLS10, VersionTLS11:
- return prf10, crypto.Hash(0)
- case VersionTLS12:
- if suite.flags&suiteSHA384 != 0 {
- return prf12(sha512.New384), crypto.SHA384
- }
- return prf12(sha256.New), crypto.SHA256
- default:
- panic("unknown version")
- }
-}
-
-func prfForVersion(version uint16, suite *cipherSuite) func(result, secret, label, seed []byte) {
- prf, _ := prfAndHashForVersion(version, suite)
- return prf
-}
-
-// masterFromPreMasterSecret generates the master secret from the pre-master
-// secret. See RFC 5246, Section 8.1.
-func masterFromPreMasterSecret(version uint16, suite *cipherSuite, preMasterSecret, clientRandom, serverRandom []byte) []byte {
- seed := make([]byte, 0, len(clientRandom)+len(serverRandom))
- seed = append(seed, clientRandom...)
- seed = append(seed, serverRandom...)
-
- masterSecret := make([]byte, masterSecretLength)
- prfForVersion(version, suite)(masterSecret, preMasterSecret, masterSecretLabel, seed)
- return masterSecret
-}
-
-// keysFromMasterSecret generates the connection keys from the master
-// secret, given the lengths of the MAC key, cipher key and IV, as defined in
-// RFC 2246, Section 6.3.
-func keysFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) {
- seed := make([]byte, 0, len(serverRandom)+len(clientRandom))
- seed = append(seed, serverRandom...)
- seed = append(seed, clientRandom...)
-
- n := 2*macLen + 2*keyLen + 2*ivLen
- keyMaterial := make([]byte, n)
- prfForVersion(version, suite)(keyMaterial, masterSecret, keyExpansionLabel, seed)
- clientMAC = keyMaterial[:macLen]
- keyMaterial = keyMaterial[macLen:]
- serverMAC = keyMaterial[:macLen]
- keyMaterial = keyMaterial[macLen:]
- clientKey = keyMaterial[:keyLen]
- keyMaterial = keyMaterial[keyLen:]
- serverKey = keyMaterial[:keyLen]
- keyMaterial = keyMaterial[keyLen:]
- clientIV = keyMaterial[:ivLen]
- keyMaterial = keyMaterial[ivLen:]
- serverIV = keyMaterial[:ivLen]
- return
-}
-
-func newFinishedHash(version uint16, cipherSuite *cipherSuite) finishedHash {
- var buffer []byte
- if version >= VersionTLS12 {
- buffer = []byte{}
- }
-
- prf, hash := prfAndHashForVersion(version, cipherSuite)
- if hash != 0 {
- return finishedHash{hash.New(), hash.New(), nil, nil, buffer, version, prf}
- }
-
- return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), buffer, version, prf}
-}
-
-// A finishedHash calculates the hash of a set of handshake messages suitable
-// for including in a Finished message.
-type finishedHash struct {
- client hash.Hash
- server hash.Hash
-
- // Prior to TLS 1.2, an additional MD5 hash is required.
- clientMD5 hash.Hash
- serverMD5 hash.Hash
-
- // In TLS 1.2, a full buffer is sadly required.
- buffer []byte
-
- version uint16
- prf func(result, secret, label, seed []byte)
-}
-
-func (h *finishedHash) Write(msg []byte) (n int, err error) {
- h.client.Write(msg)
- h.server.Write(msg)
-
- if h.version < VersionTLS12 {
- h.clientMD5.Write(msg)
- h.serverMD5.Write(msg)
- }
-
- if h.buffer != nil {
- h.buffer = append(h.buffer, msg...)
- }
-
- return len(msg), nil
-}
-
-func (h finishedHash) Sum() []byte {
- if h.version >= VersionTLS12 {
- return h.client.Sum(nil)
- }
-
- out := make([]byte, 0, md5.Size+sha1.Size)
- out = h.clientMD5.Sum(out)
- return h.client.Sum(out)
-}
-
-// clientSum returns the contents of the verify_data member of a client's
-// Finished message.
-func (h finishedHash) clientSum(masterSecret []byte) []byte {
- out := make([]byte, finishedVerifyLength)
- h.prf(out, masterSecret, clientFinishedLabel, h.Sum())
- return out
-}
-
-// serverSum returns the contents of the verify_data member of a server's
-// Finished message.
-func (h finishedHash) serverSum(masterSecret []byte) []byte {
- out := make([]byte, finishedVerifyLength)
- h.prf(out, masterSecret, serverFinishedLabel, h.Sum())
- return out
-}
-
-// hashForClientCertificate returns the handshake messages so far, pre-hashed if
-// necessary, suitable for signing by a TLS client certificate.
-func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash, masterSecret []byte) []byte {
- if (h.version >= VersionTLS12 || sigType == signatureEd25519) && h.buffer == nil {
- panic("tls: handshake hash for a client certificate requested after discarding the handshake buffer")
- }
-
- if sigType == signatureEd25519 {
- return h.buffer
- }
-
- if h.version >= VersionTLS12 {
- hash := hashAlg.New()
- hash.Write(h.buffer)
- return hash.Sum(nil)
- }
-
- if sigType == signatureECDSA {
- return h.server.Sum(nil)
- }
-
- return h.Sum()
-}
-
-// discardHandshakeBuffer is called when there is no more need to
-// buffer the entirety of the handshake messages.
-func (h *finishedHash) discardHandshakeBuffer() {
- h.buffer = nil
-}
-
-// noExportedKeyingMaterial is used as a value of
-// ConnectionState.ekm when renegotiation is enabled and thus
-// we wish to fail all key-material export requests.
-func noExportedKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
- return nil, errors.New("crypto/tls: ExportKeyingMaterial is unavailable when renegotiation is enabled")
-}
-
-// ekmFromMasterSecret generates exported keying material as defined in RFC 5705.
-func ekmFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte) func(string, []byte, int) ([]byte, error) {
- return func(label string, context []byte, length int) ([]byte, error) {
- switch label {
- case "client finished", "server finished", "master secret", "key expansion":
- // These values are reserved and may not be used.
- return nil, fmt.Errorf("crypto/tls: reserved ExportKeyingMaterial label: %s", label)
- }
-
- seedLen := len(serverRandom) + len(clientRandom)
- if context != nil {
- seedLen += 2 + len(context)
- }
- seed := make([]byte, 0, seedLen)
-
- seed = append(seed, clientRandom...)
- seed = append(seed, serverRandom...)
-
- if context != nil {
- if len(context) >= 1<<16 {
- return nil, fmt.Errorf("crypto/tls: ExportKeyingMaterial context too long")
- }
- seed = append(seed, byte(len(context)>>8), byte(len(context)))
- seed = append(seed, context...)
- }
-
- keyMaterial := make([]byte, length)
- prfForVersion(version, suite)(keyMaterial, masterSecret, []byte(label), seed)
- return keyMaterial, nil
- }
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/README.md b/vendor/github.com/marten-seemann/qtls-go1-18/README.md
deleted file mode 100644
index 3e9022127..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-18/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# qtls
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/marten-seemann/qtls-go1-17.svg)](https://pkg.go.dev/github.com/marten-seemann/qtls-go1-17)
-[![.github/workflows/go-test.yml](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml/badge.svg)](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml)
-
-This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go).
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/alert.go b/vendor/github.com/marten-seemann/qtls-go1-18/alert.go
deleted file mode 100644
index 3feac79be..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-18/alert.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import "strconv"
-
-type alert uint8
-
-// Alert is a TLS alert
-type Alert = alert
-
-const (
- // alert level
- alertLevelWarning = 1
- alertLevelError = 2
-)
-
-const (
- alertCloseNotify alert = 0
- alertUnexpectedMessage alert = 10
- alertBadRecordMAC alert = 20
- alertDecryptionFailed alert = 21
- alertRecordOverflow alert = 22
- alertDecompressionFailure alert = 30
- alertHandshakeFailure alert = 40
- alertBadCertificate alert = 42
- alertUnsupportedCertificate alert = 43
- alertCertificateRevoked alert = 44
- alertCertificateExpired alert = 45
- alertCertificateUnknown alert = 46
- alertIllegalParameter alert = 47
- alertUnknownCA alert = 48
- alertAccessDenied alert = 49
- alertDecodeError alert = 50
- alertDecryptError alert = 51
- alertExportRestriction alert = 60
- alertProtocolVersion alert = 70
- alertInsufficientSecurity alert = 71
- alertInternalError alert = 80
- alertInappropriateFallback alert = 86
- alertUserCanceled alert = 90
- alertNoRenegotiation alert = 100
- alertMissingExtension alert = 109
- alertUnsupportedExtension alert = 110
- alertCertificateUnobtainable alert = 111
- alertUnrecognizedName alert = 112
- alertBadCertificateStatusResponse alert = 113
- alertBadCertificateHashValue alert = 114
- alertUnknownPSKIdentity alert = 115
- alertCertificateRequired alert = 116
- alertNoApplicationProtocol alert = 120
-)
-
-var alertText = map[alert]string{
- alertCloseNotify: "close notify",
- alertUnexpectedMessage: "unexpected message",
- alertBadRecordMAC: "bad record MAC",
- alertDecryptionFailed: "decryption failed",
- alertRecordOverflow: "record overflow",
- alertDecompressionFailure: "decompression failure",
- alertHandshakeFailure: "handshake failure",
- alertBadCertificate: "bad certificate",
- alertUnsupportedCertificate: "unsupported certificate",
- alertCertificateRevoked: "revoked certificate",
- alertCertificateExpired: "expired certificate",
- alertCertificateUnknown: "unknown certificate",
- alertIllegalParameter: "illegal parameter",
- alertUnknownCA: "unknown certificate authority",
- alertAccessDenied: "access denied",
- alertDecodeError: "error decoding message",
- alertDecryptError: "error decrypting message",
- alertExportRestriction: "export restriction",
- alertProtocolVersion: "protocol version not supported",
- alertInsufficientSecurity: "insufficient security level",
- alertInternalError: "internal error",
- alertInappropriateFallback: "inappropriate fallback",
- alertUserCanceled: "user canceled",
- alertNoRenegotiation: "no renegotiation",
- alertMissingExtension: "missing extension",
- alertUnsupportedExtension: "unsupported extension",
- alertCertificateUnobtainable: "certificate unobtainable",
- alertUnrecognizedName: "unrecognized name",
- alertBadCertificateStatusResponse: "bad certificate status response",
- alertBadCertificateHashValue: "bad certificate hash value",
- alertUnknownPSKIdentity: "unknown PSK identity",
- alertCertificateRequired: "certificate required",
- alertNoApplicationProtocol: "no application protocol",
-}
-
-func (e alert) String() string {
- s, ok := alertText[e]
- if ok {
- return "tls: " + s
- }
- return "tls: alert(" + strconv.Itoa(int(e)) + ")"
-}
-
-func (e alert) Error() string {
- return e.String()
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/auth.go b/vendor/github.com/marten-seemann/qtls-go1-18/auth.go
deleted file mode 100644
index 1ef675fd3..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-18/auth.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/elliptic"
- "crypto/rsa"
- "errors"
- "fmt"
- "hash"
- "io"
-)
-
-// verifyHandshakeSignature verifies a signature against pre-hashed
-// (if required) handshake contents.
-func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, signed, sig []byte) error {
- switch sigType {
- case signatureECDSA:
- pubKey, ok := pubkey.(*ecdsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an ECDSA public key, got %T", pubkey)
- }
- if !ecdsa.VerifyASN1(pubKey, signed, sig) {
- return errors.New("ECDSA verification failure")
- }
- case signatureEd25519:
- pubKey, ok := pubkey.(ed25519.PublicKey)
- if !ok {
- return fmt.Errorf("expected an Ed25519 public key, got %T", pubkey)
- }
- if !ed25519.Verify(pubKey, signed, sig) {
- return errors.New("Ed25519 verification failure")
- }
- case signaturePKCS1v15:
- pubKey, ok := pubkey.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an RSA public key, got %T", pubkey)
- }
- if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, signed, sig); err != nil {
- return err
- }
- case signatureRSAPSS:
- pubKey, ok := pubkey.(*rsa.PublicKey)
- if !ok {
- return fmt.Errorf("expected an RSA public key, got %T", pubkey)
- }
- signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}
- if err := rsa.VerifyPSS(pubKey, hashFunc, signed, sig, signOpts); err != nil {
- return err
- }
- default:
- return errors.New("internal error: unknown signature type")
- }
- return nil
-}
-
-const (
- serverSignatureContext = "TLS 1.3, server CertificateVerify\x00"
- clientSignatureContext = "TLS 1.3, client CertificateVerify\x00"
-)
-
-var signaturePadding = []byte{
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
-}
-
-// signedMessage returns the pre-hashed (if necessary) message to be signed by
-// certificate keys in TLS 1.3. See RFC 8446, Section 4.4.3.
-func signedMessage(sigHash crypto.Hash, context string, transcript hash.Hash) []byte {
- if sigHash == directSigning {
- b := &bytes.Buffer{}
- b.Write(signaturePadding)
- io.WriteString(b, context)
- b.Write(transcript.Sum(nil))
- return b.Bytes()
- }
- h := sigHash.New()
- h.Write(signaturePadding)
- io.WriteString(h, context)
- h.Write(transcript.Sum(nil))
- return h.Sum(nil)
-}
-
-// typeAndHashFromSignatureScheme returns the corresponding signature type and
-// crypto.Hash for a given TLS SignatureScheme.
-func typeAndHashFromSignatureScheme(signatureAlgorithm SignatureScheme) (sigType uint8, hash crypto.Hash, err error) {
- switch signatureAlgorithm {
- case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512:
- sigType = signaturePKCS1v15
- case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512:
- sigType = signatureRSAPSS
- case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512:
- sigType = signatureECDSA
- case Ed25519:
- sigType = signatureEd25519
- default:
- return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
- }
- switch signatureAlgorithm {
- case PKCS1WithSHA1, ECDSAWithSHA1:
- hash = crypto.SHA1
- case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256:
- hash = crypto.SHA256
- case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384:
- hash = crypto.SHA384
- case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512:
- hash = crypto.SHA512
- case Ed25519:
- hash = directSigning
- default:
- return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
- }
- return sigType, hash, nil
-}
-
-// legacyTypeAndHashFromPublicKey returns the fixed signature type and crypto.Hash for
-// a given public key used with TLS 1.0 and 1.1, before the introduction of
-// signature algorithm negotiation.
-func legacyTypeAndHashFromPublicKey(pub crypto.PublicKey) (sigType uint8, hash crypto.Hash, err error) {
- switch pub.(type) {
- case *rsa.PublicKey:
- return signaturePKCS1v15, crypto.MD5SHA1, nil
- case *ecdsa.PublicKey:
- return signatureECDSA, crypto.SHA1, nil
- case ed25519.PublicKey:
- // RFC 8422 specifies support for Ed25519 in TLS 1.0 and 1.1,
- // but it requires holding on to a handshake transcript to do a
- // full signature, and not even OpenSSL bothers with the
- // complexity, so we can't even test it properly.
- return 0, 0, fmt.Errorf("tls: Ed25519 public keys are not supported before TLS 1.2")
- default:
- return 0, 0, fmt.Errorf("tls: unsupported public key: %T", pub)
- }
-}
-
-var rsaSignatureSchemes = []struct {
- scheme SignatureScheme
- minModulusBytes int
- maxVersion uint16
-}{
- // RSA-PSS is used with PSSSaltLengthEqualsHash, and requires
- // emLen >= hLen + sLen + 2
- {PSSWithSHA256, crypto.SHA256.Size()*2 + 2, VersionTLS13},
- {PSSWithSHA384, crypto.SHA384.Size()*2 + 2, VersionTLS13},
- {PSSWithSHA512, crypto.SHA512.Size()*2 + 2, VersionTLS13},
- // PKCS #1 v1.5 uses prefixes from hashPrefixes in crypto/rsa, and requires
- // emLen >= len(prefix) + hLen + 11
- // TLS 1.3 dropped support for PKCS #1 v1.5 in favor of RSA-PSS.
- {PKCS1WithSHA256, 19 + crypto.SHA256.Size() + 11, VersionTLS12},
- {PKCS1WithSHA384, 19 + crypto.SHA384.Size() + 11, VersionTLS12},
- {PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11, VersionTLS12},
- {PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11, VersionTLS12},
-}
-
-// signatureSchemesForCertificate returns the list of supported SignatureSchemes
-// for a given certificate, based on the public key and the protocol version,
-// and optionally filtered by its explicit SupportedSignatureAlgorithms.
-//
-// This function must be kept in sync with supportedSignatureAlgorithms.
-func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme {
- priv, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return nil
- }
-
- var sigAlgs []SignatureScheme
- switch pub := priv.Public().(type) {
- case *ecdsa.PublicKey:
- if version != VersionTLS13 {
- // In TLS 1.2 and earlier, ECDSA algorithms are not
- // constrained to a single curve.
- sigAlgs = []SignatureScheme{
- ECDSAWithP256AndSHA256,
- ECDSAWithP384AndSHA384,
- ECDSAWithP521AndSHA512,
- ECDSAWithSHA1,
- }
- break
- }
- switch pub.Curve {
- case elliptic.P256():
- sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256}
- case elliptic.P384():
- sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384}
- case elliptic.P521():
- sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512}
- default:
- return nil
- }
- case *rsa.PublicKey:
- size := pub.Size()
- sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes))
- for _, candidate := range rsaSignatureSchemes {
- if size >= candidate.minModulusBytes && version <= candidate.maxVersion {
- sigAlgs = append(sigAlgs, candidate.scheme)
- }
- }
- case ed25519.PublicKey:
- sigAlgs = []SignatureScheme{Ed25519}
- default:
- return nil
- }
-
- if cert.SupportedSignatureAlgorithms != nil {
- var filteredSigAlgs []SignatureScheme
- for _, sigAlg := range sigAlgs {
- if isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) {
- filteredSigAlgs = append(filteredSigAlgs, sigAlg)
- }
- }
- return filteredSigAlgs
- }
- return sigAlgs
-}
-
-// selectSignatureScheme picks a SignatureScheme from the peer's preference list
-// that works with the selected certificate. It's only called for protocol
-// versions that support signature algorithms, so TLS 1.2 and 1.3.
-func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) {
- supportedAlgs := signatureSchemesForCertificate(vers, c)
- if len(supportedAlgs) == 0 {
- return 0, unsupportedCertificateError(c)
- }
- if len(peerAlgs) == 0 && vers == VersionTLS12 {
- // For TLS 1.2, if the client didn't send signature_algorithms then we
- // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1.
- peerAlgs = []SignatureScheme{PKCS1WithSHA1, ECDSAWithSHA1}
- }
- // Pick signature scheme in the peer's preference order, as our
- // preference order is not configurable.
- for _, preferredAlg := range peerAlgs {
- if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) {
- return preferredAlg, nil
- }
- }
- return 0, errors.New("tls: peer doesn't support any of the certificate's signature algorithms")
-}
-
-// unsupportedCertificateError returns a helpful error for certificates with
-// an unsupported private key.
-func unsupportedCertificateError(cert *Certificate) error {
- switch cert.PrivateKey.(type) {
- case rsa.PrivateKey, ecdsa.PrivateKey:
- return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T",
- cert.PrivateKey, cert.PrivateKey)
- case *ed25519.PrivateKey:
- return fmt.Errorf("tls: unsupported certificate: private key is *ed25519.PrivateKey, expected ed25519.PrivateKey")
- }
-
- signer, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer",
- cert.PrivateKey)
- }
-
- switch pub := signer.Public().(type) {
- case *ecdsa.PublicKey:
- switch pub.Curve {
- case elliptic.P256():
- case elliptic.P384():
- case elliptic.P521():
- default:
- return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name)
- }
- case *rsa.PublicKey:
- return fmt.Errorf("tls: certificate RSA key size too small for supported signature algorithms")
- case ed25519.PublicKey:
- default:
- return fmt.Errorf("tls: unsupported certificate key (%T)", pub)
- }
-
- if cert.SupportedSignatureAlgorithms != nil {
- return fmt.Errorf("tls: peer doesn't support the certificate custom signature algorithms")
- }
-
- return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey)
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go b/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go
deleted file mode 100644
index 0de59fc1e..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go
+++ /dev/null
@@ -1,732 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/hmac"
- "crypto/rsa"
- "encoding/binary"
- "errors"
- "hash"
- "sync/atomic"
- "time"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-type clientHandshakeStateTLS13 struct {
- c *Conn
- ctx context.Context
- serverHello *serverHelloMsg
- hello *clientHelloMsg
- ecdheParams ecdheParameters
-
- session *clientSessionState
- earlySecret []byte
- binderKey []byte
-
- certReq *certificateRequestMsgTLS13
- usingPSK bool
- sentDummyCCS bool
- suite *cipherSuiteTLS13
- transcript hash.Hash
- masterSecret []byte
- trafficSecret []byte // client_application_traffic_secret_0
-}
-
-// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and,
-// optionally, hs.session, hs.earlySecret and hs.binderKey to be set.
-func (hs *clientHandshakeStateTLS13) handshake() error {
- c := hs.c
-
- // The server must not select TLS 1.3 in a renegotiation. See RFC 8446,
- // sections 4.1.2 and 4.1.3.
- if c.handshakes > 0 {
- c.sendAlert(alertProtocolVersion)
- return errors.New("tls: server selected TLS 1.3 in a renegotiation")
- }
-
- // Consistency check on the presence of a keyShare and its parameters.
- if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 {
- return c.sendAlert(alertInternalError)
- }
-
- if err := hs.checkServerHelloOrHRR(); err != nil {
- return err
- }
-
- hs.transcript = hs.suite.hash.New()
- hs.transcript.Write(hs.hello.marshal())
-
- if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
- if err := hs.processHelloRetryRequest(); err != nil {
- return err
- }
- }
-
- hs.transcript.Write(hs.serverHello.marshal())
-
- c.buffering = true
- if err := hs.processServerHello(); err != nil {
- return err
- }
- if err := hs.sendDummyChangeCipherSpec(); err != nil {
- return err
- }
- if err := hs.establishHandshakeKeys(); err != nil {
- return err
- }
- if err := hs.readServerParameters(); err != nil {
- return err
- }
- if err := hs.readServerCertificate(); err != nil {
- return err
- }
- if err := hs.readServerFinished(); err != nil {
- return err
- }
- if err := hs.sendClientCertificate(); err != nil {
- return err
- }
- if err := hs.sendClientFinished(); err != nil {
- return err
- }
- if _, err := c.flush(); err != nil {
- return err
- }
-
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
- return nil
-}
-
-// checkServerHelloOrHRR does validity checks that apply to both ServerHello and
-// HelloRetryRequest messages. It sets hs.suite.
-func (hs *clientHandshakeStateTLS13) checkServerHelloOrHRR() error {
- c := hs.c
-
- if hs.serverHello.supportedVersion == 0 {
- c.sendAlert(alertMissingExtension)
- return errors.New("tls: server selected TLS 1.3 using the legacy version field")
- }
-
- if hs.serverHello.supportedVersion != VersionTLS13 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid version after a HelloRetryRequest")
- }
-
- if hs.serverHello.vers != VersionTLS12 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an incorrect legacy version")
- }
-
- if hs.serverHello.ocspStapling ||
- hs.serverHello.ticketSupported ||
- hs.serverHello.secureRenegotiationSupported ||
- len(hs.serverHello.secureRenegotiation) != 0 ||
- len(hs.serverHello.alpnProtocol) != 0 ||
- len(hs.serverHello.scts) != 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server sent a ServerHello extension forbidden in TLS 1.3")
- }
-
- if !bytes.Equal(hs.hello.sessionId, hs.serverHello.sessionId) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server did not echo the legacy session ID")
- }
-
- if hs.serverHello.compressionMethod != compressionNone {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported compression format")
- }
-
- selectedSuite := mutualCipherSuiteTLS13(hs.hello.cipherSuites, hs.serverHello.cipherSuite)
- if hs.suite != nil && selectedSuite != hs.suite {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server changed cipher suite after a HelloRetryRequest")
- }
- if selectedSuite == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server chose an unconfigured cipher suite")
- }
- hs.suite = selectedSuite
- c.cipherSuite = hs.suite.id
-
- return nil
-}
-
-// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility
-// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4.
-func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
- if hs.sentDummyCCS {
- return nil
- }
- hs.sentDummyCCS = true
-
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
-}
-
-// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
-// resends hs.hello, and reads the new ServerHello into hs.serverHello.
-func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
- c := hs.c
-
- // The first ClientHello gets double-hashed into the transcript upon a
- // HelloRetryRequest. (The idea is that the server might offload transcript
- // storage to the client in the cookie.) See RFC 8446, Section 4.4.1.
- chHash := hs.transcript.Sum(nil)
- hs.transcript.Reset()
- hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- hs.transcript.Write(chHash)
- hs.transcript.Write(hs.serverHello.marshal())
-
- // The only HelloRetryRequest extensions we support are key_share and
- // cookie, and clients must abort the handshake if the HRR would not result
- // in any change in the ClientHello.
- if hs.serverHello.selectedGroup == 0 && hs.serverHello.cookie == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an unnecessary HelloRetryRequest message")
- }
-
- if hs.serverHello.cookie != nil {
- hs.hello.cookie = hs.serverHello.cookie
- }
-
- if hs.serverHello.serverShare.group != 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: received malformed key_share extension")
- }
-
- // If the server sent a key_share extension selecting a group, ensure it's
- // a group we advertised but did not send a key share for, and send a key
- // share for it this time.
- if curveID := hs.serverHello.selectedGroup; curveID != 0 {
- curveOK := false
- for _, id := range hs.hello.supportedCurves {
- if id == curveID {
- curveOK = true
- break
- }
- }
- if !curveOK {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported group")
- }
- if hs.ecdheParams.CurveID() == curveID {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share")
- }
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- c.sendAlert(alertInternalError)
- return errors.New("tls: CurvePreferences includes unsupported curve")
- }
- params, err := generateECDHEParameters(c.config.rand(), curveID)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- hs.ecdheParams = params
- hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
- }
-
- hs.hello.raw = nil
- if len(hs.hello.pskIdentities) > 0 {
- pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
- if pskSuite == nil {
- return c.sendAlert(alertInternalError)
- }
- if pskSuite.hash == hs.suite.hash {
- // Update binders and obfuscated_ticket_age.
- ticketAge := uint32(c.config.time().Sub(hs.session.receivedAt) / time.Millisecond)
- hs.hello.pskIdentities[0].obfuscatedTicketAge = ticketAge + hs.session.ageAdd
-
- transcript := hs.suite.hash.New()
- transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
- transcript.Write(chHash)
- transcript.Write(hs.serverHello.marshal())
- transcript.Write(hs.hello.marshalWithoutBinders())
- pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
- hs.hello.updateBinders(pskBinders)
- } else {
- // Server selected a cipher suite incompatible with the PSK.
- hs.hello.pskIdentities = nil
- hs.hello.pskBinders = nil
- }
- }
-
- if hs.hello.earlyData && c.extraConfig != nil && c.extraConfig.Rejected0RTT != nil {
- c.extraConfig.Rejected0RTT()
- }
- hs.hello.earlyData = false // disable 0-RTT
-
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
- return err
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- serverHello, ok := msg.(*serverHelloMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(serverHello, msg)
- }
- hs.serverHello = serverHello
-
- if err := hs.checkServerHelloOrHRR(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) processServerHello() error {
- c := hs.c
-
- if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: server sent two HelloRetryRequest messages")
- }
-
- if len(hs.serverHello.cookie) != 0 {
- c.sendAlert(alertUnsupportedExtension)
- return errors.New("tls: server sent a cookie in a normal ServerHello")
- }
-
- if hs.serverHello.selectedGroup != 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: malformed key_share extension")
- }
-
- if hs.serverHello.serverShare.group == 0 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server did not send a key share")
- }
- if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected unsupported group")
- }
-
- if !hs.serverHello.selectedIdentityPresent {
- return nil
- }
-
- if int(hs.serverHello.selectedIdentity) >= len(hs.hello.pskIdentities) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid PSK")
- }
-
- if len(hs.hello.pskIdentities) != 1 || hs.session == nil {
- return c.sendAlert(alertInternalError)
- }
- pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite)
- if pskSuite == nil {
- return c.sendAlert(alertInternalError)
- }
- if pskSuite.hash != hs.suite.hash {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: server selected an invalid PSK and cipher suite pair")
- }
-
- hs.usingPSK = true
- c.didResume = true
- c.peerCertificates = hs.session.serverCertificates
- c.verifiedChains = hs.session.verifiedChains
- c.ocspResponse = hs.session.ocspResponse
- c.scts = hs.session.scts
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
- c := hs.c
-
- sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data)
- if sharedKey == nil {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: invalid server key share")
- }
-
- earlySecret := hs.earlySecret
- if !hs.usingPSK {
- earlySecret = hs.suite.extract(nil, nil)
- }
- handshakeSecret := hs.suite.extract(sharedKey,
- hs.suite.deriveSecret(earlySecret, "derived", nil))
-
- clientSecret := hs.suite.deriveSecret(handshakeSecret,
- clientHandshakeTrafficLabel, hs.transcript)
- c.out.exportKey(EncryptionHandshake, hs.suite, clientSecret)
- c.out.setTrafficSecret(hs.suite, clientSecret)
- serverSecret := hs.suite.deriveSecret(handshakeSecret,
- serverHandshakeTrafficLabel, hs.transcript)
- c.in.exportKey(EncryptionHandshake, hs.suite, serverSecret)
- c.in.setTrafficSecret(hs.suite, serverSecret)
-
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.hello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- hs.masterSecret = hs.suite.extract(nil,
- hs.suite.deriveSecret(handshakeSecret, "derived", nil))
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerParameters() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- encryptedExtensions, ok := msg.(*encryptedExtensionsMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(encryptedExtensions, msg)
- }
- // Notify the caller if 0-RTT was rejected.
- if !encryptedExtensions.earlyData && hs.hello.earlyData && c.extraConfig != nil && c.extraConfig.Rejected0RTT != nil {
- c.extraConfig.Rejected0RTT()
- }
- c.used0RTT = encryptedExtensions.earlyData
- if hs.c.extraConfig != nil && hs.c.extraConfig.ReceivedExtensions != nil {
- hs.c.extraConfig.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions)
- }
- hs.transcript.Write(encryptedExtensions.marshal())
-
- if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil {
- c.sendAlert(alertUnsupportedExtension)
- return err
- }
- c.clientProtocol = encryptedExtensions.alpnProtocol
-
- if c.extraConfig != nil && c.extraConfig.EnforceNextProtoSelection {
- if len(encryptedExtensions.alpnProtocol) == 0 {
- // the server didn't select an ALPN
- c.sendAlert(alertNoApplicationProtocol)
- return errors.New("ALPN negotiation failed. Server didn't offer any protocols")
- }
- }
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
- c := hs.c
-
- // Either a PSK or a certificate is always used, but not both.
- // See RFC 8446, Section 4.1.1.
- if hs.usingPSK {
- // Make sure the connection is still being verified whether or not this
- // is a resumption. Resumptions currently don't reverify certificates so
- // they don't call verifyServerCertificate. See Issue 31641.
- if c.config.VerifyConnection != nil {
- if err := c.config.VerifyConnection(c.connectionStateLocked()); err != nil {
- c.sendAlert(alertBadCertificate)
- return err
- }
- }
- return nil
- }
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- certReq, ok := msg.(*certificateRequestMsgTLS13)
- if ok {
- hs.transcript.Write(certReq.marshal())
-
- hs.certReq = certReq
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
- }
-
- certMsg, ok := msg.(*certificateMsgTLS13)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certMsg, msg)
- }
- if len(certMsg.certificate.Certificate) == 0 {
- c.sendAlert(alertDecodeError)
- return errors.New("tls: received empty certificates message")
- }
- hs.transcript.Write(certMsg.marshal())
-
- c.scts = certMsg.certificate.SignedCertificateTimestamps
- c.ocspResponse = certMsg.certificate.OCSPStaple
-
- if err := c.verifyServerCertificate(certMsg.certificate.Certificate); err != nil {
- return err
- }
-
- msg, err = c.readHandshake()
- if err != nil {
- return err
- }
-
- certVerify, ok := msg.(*certificateVerifyMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(certVerify, msg)
- }
-
- // See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerify.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
- if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- signed := signedMessage(sigHash, serverSignatureContext, hs.transcript)
- if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey,
- sigHash, signed, certVerify.signature); err != nil {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid signature by the server certificate: " + err.Error())
- }
-
- hs.transcript.Write(certVerify.marshal())
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) readServerFinished() error {
- c := hs.c
-
- msg, err := c.readHandshake()
- if err != nil {
- return err
- }
-
- finished, ok := msg.(*finishedMsg)
- if !ok {
- c.sendAlert(alertUnexpectedMessage)
- return unexpectedMessageError(finished, msg)
- }
-
- expectedMAC := hs.suite.finishedHash(c.in.trafficSecret, hs.transcript)
- if !hmac.Equal(expectedMAC, finished.verifyData) {
- c.sendAlert(alertDecryptError)
- return errors.New("tls: invalid server finished hash")
- }
-
- hs.transcript.Write(finished.marshal())
-
- // Derive secrets that take context through the server Finished.
-
- hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret,
- clientApplicationTrafficLabel, hs.transcript)
- serverSecret := hs.suite.deriveSecret(hs.masterSecret,
- serverApplicationTrafficLabel, hs.transcript)
- c.in.exportKey(EncryptionApplication, hs.suite, serverSecret)
- c.in.setTrafficSecret(hs.suite, serverSecret)
-
- err = c.config.writeKeyLog(keyLogLabelClientTraffic, hs.hello.random, hs.trafficSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
- err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.hello.random, serverSecret)
- if err != nil {
- c.sendAlert(alertInternalError)
- return err
- }
-
- c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript)
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
- c := hs.c
-
- if hs.certReq == nil {
- return nil
- }
-
- cert, err := c.getClientCertificate(toCertificateRequestInfo(&certificateRequestInfo{
- AcceptableCAs: hs.certReq.certificateAuthorities,
- SignatureSchemes: hs.certReq.supportedSignatureAlgorithms,
- Version: c.vers,
- ctx: hs.ctx,
- }))
- if err != nil {
- return err
- }
-
- certMsg := new(certificateMsgTLS13)
-
- certMsg.certificate = *cert
- certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
- certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
-
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
- return err
- }
-
- // If we sent an empty certificate message, skip the CertificateVerify.
- if len(cert.Certificate) == 0 {
- return nil
- }
-
- certVerifyMsg := new(certificateVerifyMsg)
- certVerifyMsg.hasSignatureAlgorithm = true
-
- certVerifyMsg.signatureAlgorithm, err = selectSignatureScheme(c.vers, cert, hs.certReq.supportedSignatureAlgorithms)
- if err != nil {
- // getClientCertificate returned a certificate incompatible with the
- // CertificateRequestInfo supported signature algorithms.
- c.sendAlert(alertHandshakeFailure)
- return err
- }
-
- sigType, sigHash, err := typeAndHashFromSignatureScheme(certVerifyMsg.signatureAlgorithm)
- if err != nil {
- return c.sendAlert(alertInternalError)
- }
-
- signed := signedMessage(sigHash, clientSignatureContext, hs.transcript)
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), signed, signOpts)
- if err != nil {
- c.sendAlert(alertInternalError)
- return errors.New("tls: failed to sign handshake: " + err.Error())
- }
- certVerifyMsg.signature = sig
-
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
- return err
- }
-
- return nil
-}
-
-func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
- c := hs.c
-
- finished := &finishedMsg{
- verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
- }
-
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
- return err
- }
-
- c.out.exportKey(EncryptionApplication, hs.suite, hs.trafficSecret)
- c.out.setTrafficSecret(hs.suite, hs.trafficSecret)
-
- if !c.config.SessionTicketsDisabled && c.config.ClientSessionCache != nil {
- c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret,
- resumptionLabel, hs.transcript)
- }
-
- return nil
-}
-
-func (c *Conn) handleNewSessionTicket(msg *newSessionTicketMsgTLS13) error {
- if !c.isClient {
- c.sendAlert(alertUnexpectedMessage)
- return errors.New("tls: received new session ticket from a client")
- }
-
- if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return nil
- }
-
- // See RFC 8446, Section 4.6.1.
- if msg.lifetime == 0 {
- return nil
- }
- lifetime := time.Duration(msg.lifetime) * time.Second
- if lifetime > maxSessionTicketLifetime {
- c.sendAlert(alertIllegalParameter)
- return errors.New("tls: received a session ticket with invalid lifetime")
- }
-
- cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite)
- if cipherSuite == nil || c.resumptionSecret == nil {
- return c.sendAlert(alertInternalError)
- }
-
- // We need to save the max_early_data_size that the server sent us, in order
- // to decide if we're going to try 0-RTT with this ticket.
- // However, at the same time, the qtls.ClientSessionTicket needs to be equal to
- // the tls.ClientSessionTicket, so we can't just add a new field to the struct.
- // We therefore abuse the nonce field (which is a byte slice)
- nonceWithEarlyData := make([]byte, len(msg.nonce)+4)
- binary.BigEndian.PutUint32(nonceWithEarlyData, msg.maxEarlyData)
- copy(nonceWithEarlyData[4:], msg.nonce)
-
- var appData []byte
- if c.extraConfig != nil && c.extraConfig.GetAppDataForSessionState != nil {
- appData = c.extraConfig.GetAppDataForSessionState()
- }
- var b cryptobyte.Builder
- b.AddUint16(clientSessionStateVersion) // revision
- b.AddUint32(msg.maxEarlyData)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(appData)
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(msg.nonce)
- })
-
- // Save the resumption_master_secret and nonce instead of deriving the PSK
- // to do the least amount of work on NewSessionTicket messages before we
- // know if the ticket will be used. Forward secrecy of resumed connections
- // is guaranteed by the requirement for pskModeDHE.
- session := &clientSessionState{
- sessionTicket: msg.label,
- vers: c.vers,
- cipherSuite: c.cipherSuite,
- masterSecret: c.resumptionSecret,
- serverCertificates: c.peerCertificates,
- verifiedChains: c.verifiedChains,
- receivedAt: c.config.time(),
- nonce: b.BytesOrPanic(),
- useBy: c.config.time().Add(lifetime),
- ageAdd: msg.ageAdd,
- ocspResponse: c.ocspResponse,
- scts: c.scts,
- }
-
- cacheKey := clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
- c.config.ClientSessionCache.Put(cacheKey, toClientSessionState(session))
-
- return nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go b/vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go
deleted file mode 100644
index da13904a6..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto/elliptic"
- "crypto/hmac"
- "errors"
- "hash"
- "io"
- "math/big"
-
- "golang.org/x/crypto/cryptobyte"
- "golang.org/x/crypto/curve25519"
- "golang.org/x/crypto/hkdf"
-)
-
-// This file contains the functions necessary to compute the TLS 1.3 key
-// schedule. See RFC 8446, Section 7.
-
-const (
- resumptionBinderLabel = "res binder"
- clientHandshakeTrafficLabel = "c hs traffic"
- serverHandshakeTrafficLabel = "s hs traffic"
- clientApplicationTrafficLabel = "c ap traffic"
- serverApplicationTrafficLabel = "s ap traffic"
- exporterLabel = "exp master"
- resumptionLabel = "res master"
- trafficUpdateLabel = "traffic upd"
-)
-
-// expandLabel implements HKDF-Expand-Label from RFC 8446, Section 7.1.
-func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []byte, length int) []byte {
- var hkdfLabel cryptobyte.Builder
- hkdfLabel.AddUint16(uint16(length))
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte("tls13 "))
- b.AddBytes([]byte(label))
- })
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(context)
- })
- out := make([]byte, length)
- n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out)
- if err != nil || n != length {
- panic("tls: HKDF-Expand-Label invocation failed unexpectedly")
- }
- return out
-}
-
-// deriveSecret implements Derive-Secret from RFC 8446, Section 7.1.
-func (c *cipherSuiteTLS13) deriveSecret(secret []byte, label string, transcript hash.Hash) []byte {
- if transcript == nil {
- transcript = c.hash.New()
- }
- return c.expandLabel(secret, label, transcript.Sum(nil), c.hash.Size())
-}
-
-// extract implements HKDF-Extract with the cipher suite hash.
-func (c *cipherSuiteTLS13) extract(newSecret, currentSecret []byte) []byte {
- if newSecret == nil {
- newSecret = make([]byte, c.hash.Size())
- }
- return hkdf.Extract(c.hash.New, newSecret, currentSecret)
-}
-
-// nextTrafficSecret generates the next traffic secret, given the current one,
-// according to RFC 8446, Section 7.2.
-func (c *cipherSuiteTLS13) nextTrafficSecret(trafficSecret []byte) []byte {
- return c.expandLabel(trafficSecret, trafficUpdateLabel, nil, c.hash.Size())
-}
-
-// trafficKey generates traffic keys according to RFC 8446, Section 7.3.
-func (c *cipherSuiteTLS13) trafficKey(trafficSecret []byte) (key, iv []byte) {
- key = c.expandLabel(trafficSecret, "key", nil, c.keyLen)
- iv = c.expandLabel(trafficSecret, "iv", nil, aeadNonceLength)
- return
-}
-
-// finishedHash generates the Finished verify_data or PskBinderEntry according
-// to RFC 8446, Section 4.4.4. See sections 4.4 and 4.2.11.2 for the baseKey
-// selection.
-func (c *cipherSuiteTLS13) finishedHash(baseKey []byte, transcript hash.Hash) []byte {
- finishedKey := c.expandLabel(baseKey, "finished", nil, c.hash.Size())
- verifyData := hmac.New(c.hash.New, finishedKey)
- verifyData.Write(transcript.Sum(nil))
- return verifyData.Sum(nil)
-}
-
-// exportKeyingMaterial implements RFC5705 exporters for TLS 1.3 according to
-// RFC 8446, Section 7.5.
-func (c *cipherSuiteTLS13) exportKeyingMaterial(masterSecret []byte, transcript hash.Hash) func(string, []byte, int) ([]byte, error) {
- expMasterSecret := c.deriveSecret(masterSecret, exporterLabel, transcript)
- return func(label string, context []byte, length int) ([]byte, error) {
- secret := c.deriveSecret(expMasterSecret, label, nil)
- h := c.hash.New()
- h.Write(context)
- return c.expandLabel(secret, "exporter", h.Sum(nil), length), nil
- }
-}
-
-// ecdheParameters implements Diffie-Hellman with either NIST curves or X25519,
-// according to RFC 8446, Section 4.2.8.2.
-type ecdheParameters interface {
- CurveID() CurveID
- PublicKey() []byte
- SharedKey(peerPublicKey []byte) []byte
-}
-
-func generateECDHEParameters(rand io.Reader, curveID CurveID) (ecdheParameters, error) {
- if curveID == X25519 {
- privateKey := make([]byte, curve25519.ScalarSize)
- if _, err := io.ReadFull(rand, privateKey); err != nil {
- return nil, err
- }
- publicKey, err := curve25519.X25519(privateKey, curve25519.Basepoint)
- if err != nil {
- return nil, err
- }
- return &x25519Parameters{privateKey: privateKey, publicKey: publicKey}, nil
- }
-
- curve, ok := curveForCurveID(curveID)
- if !ok {
- return nil, errors.New("tls: internal error: unsupported curve")
- }
-
- p := &nistParameters{curveID: curveID}
- var err error
- p.privateKey, p.x, p.y, err = elliptic.GenerateKey(curve, rand)
- if err != nil {
- return nil, err
- }
- return p, nil
-}
-
-func curveForCurveID(id CurveID) (elliptic.Curve, bool) {
- switch id {
- case CurveP256:
- return elliptic.P256(), true
- case CurveP384:
- return elliptic.P384(), true
- case CurveP521:
- return elliptic.P521(), true
- default:
- return nil, false
- }
-}
-
-type nistParameters struct {
- privateKey []byte
- x, y *big.Int // public key
- curveID CurveID
-}
-
-func (p *nistParameters) CurveID() CurveID {
- return p.curveID
-}
-
-func (p *nistParameters) PublicKey() []byte {
- curve, _ := curveForCurveID(p.curveID)
- return elliptic.Marshal(curve, p.x, p.y)
-}
-
-func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte {
- curve, _ := curveForCurveID(p.curveID)
- // Unmarshal also checks whether the given point is on the curve.
- x, y := elliptic.Unmarshal(curve, peerPublicKey)
- if x == nil {
- return nil
- }
-
- xShared, _ := curve.ScalarMult(x, y, p.privateKey)
- sharedKey := make([]byte, (curve.Params().BitSize+7)/8)
- return xShared.FillBytes(sharedKey)
-}
-
-type x25519Parameters struct {
- privateKey []byte
- publicKey []byte
-}
-
-func (p *x25519Parameters) CurveID() CurveID {
- return X25519
-}
-
-func (p *x25519Parameters) PublicKey() []byte {
- return p.publicKey[:]
-}
-
-func (p *x25519Parameters) SharedKey(peerPublicKey []byte) []byte {
- sharedKey, err := curve25519.X25519(p.privateKey, peerPublicKey)
- if err != nil {
- return nil
- }
- return sharedKey
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/unsafe.go b/vendor/github.com/marten-seemann/qtls-go1-18/unsafe.go
deleted file mode 100644
index 55fa01b3d..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-18/unsafe.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package qtls
-
-import (
- "crypto/tls"
- "reflect"
- "unsafe"
-)
-
-func init() {
- if !structsEqual(&tls.ConnectionState{}, &connectionState{}) {
- panic("qtls.ConnectionState doesn't match")
- }
- if !structsEqual(&tls.ClientSessionState{}, &clientSessionState{}) {
- panic("qtls.ClientSessionState doesn't match")
- }
- if !structsEqual(&tls.CertificateRequestInfo{}, &certificateRequestInfo{}) {
- panic("qtls.CertificateRequestInfo doesn't match")
- }
- if !structsEqual(&tls.Config{}, &config{}) {
- panic("qtls.Config doesn't match")
- }
- if !structsEqual(&tls.ClientHelloInfo{}, &clientHelloInfo{}) {
- panic("qtls.ClientHelloInfo doesn't match")
- }
-}
-
-func toConnectionState(c connectionState) ConnectionState {
- return *(*ConnectionState)(unsafe.Pointer(&c))
-}
-
-func toClientSessionState(s *clientSessionState) *ClientSessionState {
- return (*ClientSessionState)(unsafe.Pointer(s))
-}
-
-func fromClientSessionState(s *ClientSessionState) *clientSessionState {
- return (*clientSessionState)(unsafe.Pointer(s))
-}
-
-func toCertificateRequestInfo(i *certificateRequestInfo) *CertificateRequestInfo {
- return (*CertificateRequestInfo)(unsafe.Pointer(i))
-}
-
-func toConfig(c *config) *Config {
- return (*Config)(unsafe.Pointer(c))
-}
-
-func fromConfig(c *Config) *config {
- return (*config)(unsafe.Pointer(c))
-}
-
-func toClientHelloInfo(chi *clientHelloInfo) *ClientHelloInfo {
- return (*ClientHelloInfo)(unsafe.Pointer(chi))
-}
-
-func structsEqual(a, b interface{}) bool {
- return compare(reflect.ValueOf(a), reflect.ValueOf(b))
-}
-
-func compare(a, b reflect.Value) bool {
- sa := a.Elem()
- sb := b.Elem()
- if sa.NumField() != sb.NumField() {
- return false
- }
- for i := 0; i < sa.NumField(); i++ {
- fa := sa.Type().Field(i)
- fb := sb.Type().Field(i)
- if !reflect.DeepEqual(fa.Index, fb.Index) || fa.Name != fb.Name || fa.Anonymous != fb.Anonymous || fa.Offset != fb.Offset || !reflect.DeepEqual(fa.Type, fb.Type) {
- if fa.Type.Kind() != fb.Type.Kind() {
- return false
- }
- if fa.Type.Kind() == reflect.Slice {
- if !compareStruct(fa.Type.Elem(), fb.Type.Elem()) {
- return false
- }
- continue
- }
- return false
- }
- }
- return true
-}
-
-func compareStruct(a, b reflect.Type) bool {
- if a.NumField() != b.NumField() {
- return false
- }
- for i := 0; i < a.NumField(); i++ {
- fa := a.Field(i)
- fb := b.Field(i)
- if !reflect.DeepEqual(fa.Index, fb.Index) || fa.Name != fb.Name || fa.Anonymous != fb.Anonymous || fa.Offset != fb.Offset || !reflect.DeepEqual(fa.Type, fb.Type) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/README.md b/vendor/github.com/marten-seemann/qtls-go1-19/README.md
deleted file mode 100644
index 3e9022127..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# qtls
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/marten-seemann/qtls-go1-17.svg)](https://pkg.go.dev/github.com/marten-seemann/qtls-go1-17)
-[![.github/workflows/go-test.yml](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml/badge.svg)](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml)
-
-This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go).
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/alert.go b/vendor/github.com/marten-seemann/qtls-go1-19/alert.go
deleted file mode 100644
index 3feac79be..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/alert.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import "strconv"
-
-type alert uint8
-
-// Alert is a TLS alert
-type Alert = alert
-
-const (
- // alert level
- alertLevelWarning = 1
- alertLevelError = 2
-)
-
-const (
- alertCloseNotify alert = 0
- alertUnexpectedMessage alert = 10
- alertBadRecordMAC alert = 20
- alertDecryptionFailed alert = 21
- alertRecordOverflow alert = 22
- alertDecompressionFailure alert = 30
- alertHandshakeFailure alert = 40
- alertBadCertificate alert = 42
- alertUnsupportedCertificate alert = 43
- alertCertificateRevoked alert = 44
- alertCertificateExpired alert = 45
- alertCertificateUnknown alert = 46
- alertIllegalParameter alert = 47
- alertUnknownCA alert = 48
- alertAccessDenied alert = 49
- alertDecodeError alert = 50
- alertDecryptError alert = 51
- alertExportRestriction alert = 60
- alertProtocolVersion alert = 70
- alertInsufficientSecurity alert = 71
- alertInternalError alert = 80
- alertInappropriateFallback alert = 86
- alertUserCanceled alert = 90
- alertNoRenegotiation alert = 100
- alertMissingExtension alert = 109
- alertUnsupportedExtension alert = 110
- alertCertificateUnobtainable alert = 111
- alertUnrecognizedName alert = 112
- alertBadCertificateStatusResponse alert = 113
- alertBadCertificateHashValue alert = 114
- alertUnknownPSKIdentity alert = 115
- alertCertificateRequired alert = 116
- alertNoApplicationProtocol alert = 120
-)
-
-var alertText = map[alert]string{
- alertCloseNotify: "close notify",
- alertUnexpectedMessage: "unexpected message",
- alertBadRecordMAC: "bad record MAC",
- alertDecryptionFailed: "decryption failed",
- alertRecordOverflow: "record overflow",
- alertDecompressionFailure: "decompression failure",
- alertHandshakeFailure: "handshake failure",
- alertBadCertificate: "bad certificate",
- alertUnsupportedCertificate: "unsupported certificate",
- alertCertificateRevoked: "revoked certificate",
- alertCertificateExpired: "expired certificate",
- alertCertificateUnknown: "unknown certificate",
- alertIllegalParameter: "illegal parameter",
- alertUnknownCA: "unknown certificate authority",
- alertAccessDenied: "access denied",
- alertDecodeError: "error decoding message",
- alertDecryptError: "error decrypting message",
- alertExportRestriction: "export restriction",
- alertProtocolVersion: "protocol version not supported",
- alertInsufficientSecurity: "insufficient security level",
- alertInternalError: "internal error",
- alertInappropriateFallback: "inappropriate fallback",
- alertUserCanceled: "user canceled",
- alertNoRenegotiation: "no renegotiation",
- alertMissingExtension: "missing extension",
- alertUnsupportedExtension: "unsupported extension",
- alertCertificateUnobtainable: "certificate unobtainable",
- alertUnrecognizedName: "unrecognized name",
- alertBadCertificateStatusResponse: "bad certificate status response",
- alertBadCertificateHashValue: "bad certificate hash value",
- alertUnknownPSKIdentity: "unknown PSK identity",
- alertCertificateRequired: "certificate required",
- alertNoApplicationProtocol: "no application protocol",
-}
-
-func (e alert) String() string {
- s, ok := alertText[e]
- if ok {
- return "tls: " + s
- }
- return "tls: alert(" + strconv.Itoa(int(e)) + ")"
-}
-
-func (e alert) Error() string {
- return e.String()
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/cpu.go b/vendor/github.com/marten-seemann/qtls-go1-19/cpu.go
deleted file mode 100644
index 121945087..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/cpu.go
+++ /dev/null
@@ -1,22 +0,0 @@
-//go:build !js
-// +build !js
-
-package qtls
-
-import (
- "runtime"
-
- "golang.org/x/sys/cpu"
-)
-
-var (
- hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR &&
- (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasAESGCMHardwareSupport = runtime.GOARCH == "amd64" && hasGCMAsmAMD64 ||
- runtime.GOARCH == "arm64" && hasGCMAsmARM64 ||
- runtime.GOARCH == "s390x" && hasGCMAsmS390X
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/cpu_other.go b/vendor/github.com/marten-seemann/qtls-go1-19/cpu_other.go
deleted file mode 100644
index 33f7d2194..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/cpu_other.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build js
-// +build js
-
-package qtls
-
-var (
- hasGCMAsmAMD64 = false
- hasGCMAsmARM64 = false
- hasGCMAsmS390X = false
-
- hasAESGCMHardwareSupport = false
-)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go b/vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go
deleted file mode 100644
index 453a8dcf0..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/md5"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/x509"
- "errors"
- "fmt"
- "io"
-)
-
-// a keyAgreement implements the client and server side of a TLS key agreement
-// protocol by generating and processing key exchange messages.
-type keyAgreement interface {
- // On the server side, the first two methods are called in order.
-
- // In the case that the key agreement protocol doesn't use a
- // ServerKeyExchange message, generateServerKeyExchange can return nil,
- // nil.
- generateServerKeyExchange(*config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)
- processClientKeyExchange(*config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error)
-
- // On the client side, the next two methods are called in order.
-
- // This method may not be called if the server doesn't send a
- // ServerKeyExchange message.
- processServerKeyExchange(*config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error
- generateClientKeyExchange(*config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)
-}
-
-var errClientKeyExchange = errors.New("tls: invalid ClientKeyExchange message")
-var errServerKeyExchange = errors.New("tls: invalid ServerKeyExchange message")
-
-// rsaKeyAgreement implements the standard TLS key agreement where the client
-// encrypts the pre-master secret to the server's public key.
-type rsaKeyAgreement struct{}
-
-func (ka rsaKeyAgreement) generateServerKeyExchange(config *config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
- return nil, nil
-}
-
-func (ka rsaKeyAgreement) processClientKeyExchange(config *config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
- if len(ckx.ciphertext) < 2 {
- return nil, errClientKeyExchange
- }
- ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1])
- if ciphertextLen != len(ckx.ciphertext)-2 {
- return nil, errClientKeyExchange
- }
- ciphertext := ckx.ciphertext[2:]
-
- priv, ok := cert.PrivateKey.(crypto.Decrypter)
- if !ok {
- return nil, errors.New("tls: certificate private key does not implement crypto.Decrypter")
- }
- // Perform constant time RSA PKCS #1 v1.5 decryption
- preMasterSecret, err := priv.Decrypt(config.rand(), ciphertext, &rsa.PKCS1v15DecryptOptions{SessionKeyLen: 48})
- if err != nil {
- return nil, err
- }
- // We don't check the version number in the premaster secret. For one,
- // by checking it, we would leak information about the validity of the
- // encrypted pre-master secret. Secondly, it provides only a small
- // benefit against a downgrade attack and some implementations send the
- // wrong version anyway. See the discussion at the end of section
- // 7.4.7.1 of RFC 4346.
- return preMasterSecret, nil
-}
-
-func (ka rsaKeyAgreement) processServerKeyExchange(config *config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
- return errors.New("tls: unexpected ServerKeyExchange")
-}
-
-func (ka rsaKeyAgreement) generateClientKeyExchange(config *config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
- preMasterSecret := make([]byte, 48)
- preMasterSecret[0] = byte(clientHello.vers >> 8)
- preMasterSecret[1] = byte(clientHello.vers)
- _, err := io.ReadFull(config.rand(), preMasterSecret[2:])
- if err != nil {
- return nil, nil, err
- }
-
- rsaKey, ok := cert.PublicKey.(*rsa.PublicKey)
- if !ok {
- return nil, nil, errors.New("tls: server certificate contains incorrect key type for selected ciphersuite")
- }
- encrypted, err := rsa.EncryptPKCS1v15(config.rand(), rsaKey, preMasterSecret)
- if err != nil {
- return nil, nil, err
- }
- ckx := new(clientKeyExchangeMsg)
- ckx.ciphertext = make([]byte, len(encrypted)+2)
- ckx.ciphertext[0] = byte(len(encrypted) >> 8)
- ckx.ciphertext[1] = byte(len(encrypted))
- copy(ckx.ciphertext[2:], encrypted)
- return preMasterSecret, ckx, nil
-}
-
-// sha1Hash calculates a SHA1 hash over the given byte slices.
-func sha1Hash(slices [][]byte) []byte {
- hsha1 := sha1.New()
- for _, slice := range slices {
- hsha1.Write(slice)
- }
- return hsha1.Sum(nil)
-}
-
-// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the
-// concatenation of an MD5 and SHA1 hash.
-func md5SHA1Hash(slices [][]byte) []byte {
- md5sha1 := make([]byte, md5.Size+sha1.Size)
- hmd5 := md5.New()
- for _, slice := range slices {
- hmd5.Write(slice)
- }
- copy(md5sha1, hmd5.Sum(nil))
- copy(md5sha1[md5.Size:], sha1Hash(slices))
- return md5sha1
-}
-
-// hashForServerKeyExchange hashes the given slices and returns their digest
-// using the given hash function (for >= TLS 1.2) or using a default based on
-// the sigType (for earlier TLS versions). For Ed25519 signatures, which don't
-// do pre-hashing, it returns the concatenation of the slices.
-func hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint16, slices ...[]byte) []byte {
- if sigType == signatureEd25519 {
- var signed []byte
- for _, slice := range slices {
- signed = append(signed, slice...)
- }
- return signed
- }
- if version >= VersionTLS12 {
- h := hashFunc.New()
- for _, slice := range slices {
- h.Write(slice)
- }
- digest := h.Sum(nil)
- return digest
- }
- if sigType == signatureECDSA {
- return sha1Hash(slices)
- }
- return md5SHA1Hash(slices)
-}
-
-// ecdheKeyAgreement implements a TLS key agreement where the server
-// generates an ephemeral EC public/private key pair and signs it. The
-// pre-master secret is then calculated using ECDH. The signature may
-// be ECDSA, Ed25519 or RSA.
-type ecdheKeyAgreement struct {
- version uint16
- isRSA bool
- params ecdheParameters
-
- // ckx and preMasterSecret are generated in processServerKeyExchange
- // and returned in generateClientKeyExchange.
- ckx *clientKeyExchangeMsg
- preMasterSecret []byte
-}
-
-func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
- var curveID CurveID
- for _, c := range clientHello.supportedCurves {
- if config.supportsCurve(c) {
- curveID = c
- break
- }
- }
-
- if curveID == 0 {
- return nil, errors.New("tls: no supported elliptic curves offered")
- }
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return nil, errors.New("tls: CurvePreferences includes unsupported curve")
- }
-
- params, err := generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return nil, err
- }
- ka.params = params
-
- // See RFC 4492, Section 5.4.
- ecdhePublic := params.PublicKey()
- serverECDHEParams := make([]byte, 1+2+1+len(ecdhePublic))
- serverECDHEParams[0] = 3 // named curve
- serverECDHEParams[1] = byte(curveID >> 8)
- serverECDHEParams[2] = byte(curveID)
- serverECDHEParams[3] = byte(len(ecdhePublic))
- copy(serverECDHEParams[4:], ecdhePublic)
-
- priv, ok := cert.PrivateKey.(crypto.Signer)
- if !ok {
- return nil, fmt.Errorf("tls: certificate private key of type %T does not implement crypto.Signer", cert.PrivateKey)
- }
-
- var signatureAlgorithm SignatureScheme
- var sigType uint8
- var sigHash crypto.Hash
- if ka.version >= VersionTLS12 {
- signatureAlgorithm, err = selectSignatureScheme(ka.version, cert, clientHello.supportedSignatureAlgorithms)
- if err != nil {
- return nil, err
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return nil, err
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(priv.Public())
- if err != nil {
- return nil, err
- }
- }
- if (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {
- return nil, errors.New("tls: certificate cannot be used with the selected cipher suite")
- }
-
- signed := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, hello.random, serverECDHEParams)
-
- signOpts := crypto.SignerOpts(sigHash)
- if sigType == signatureRSAPSS {
- signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
- }
- sig, err := priv.Sign(config.rand(), signed, signOpts)
- if err != nil {
- return nil, errors.New("tls: failed to sign ECDHE parameters: " + err.Error())
- }
-
- skx := new(serverKeyExchangeMsg)
- sigAndHashLen := 0
- if ka.version >= VersionTLS12 {
- sigAndHashLen = 2
- }
- skx.key = make([]byte, len(serverECDHEParams)+sigAndHashLen+2+len(sig))
- copy(skx.key, serverECDHEParams)
- k := skx.key[len(serverECDHEParams):]
- if ka.version >= VersionTLS12 {
- k[0] = byte(signatureAlgorithm >> 8)
- k[1] = byte(signatureAlgorithm)
- k = k[2:]
- }
- k[0] = byte(len(sig) >> 8)
- k[1] = byte(len(sig))
- copy(k[2:], sig)
-
- return skx, nil
-}
-
-func (ka *ecdheKeyAgreement) processClientKeyExchange(config *config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
- if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {
- return nil, errClientKeyExchange
- }
-
- preMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:])
- if preMasterSecret == nil {
- return nil, errClientKeyExchange
- }
-
- return preMasterSecret, nil
-}
-
-func (ka *ecdheKeyAgreement) processServerKeyExchange(config *config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
- if len(skx.key) < 4 {
- return errServerKeyExchange
- }
- if skx.key[0] != 3 { // named curve
- return errors.New("tls: server selected unsupported curve")
- }
- curveID := CurveID(skx.key[1])<<8 | CurveID(skx.key[2])
-
- publicLen := int(skx.key[3])
- if publicLen+4 > len(skx.key) {
- return errServerKeyExchange
- }
- serverECDHEParams := skx.key[:4+publicLen]
- publicKey := serverECDHEParams[4:]
-
- sig := skx.key[4+publicLen:]
- if len(sig) < 2 {
- return errServerKeyExchange
- }
-
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
- return errors.New("tls: server selected unsupported curve")
- }
-
- params, err := generateECDHEParameters(config.rand(), curveID)
- if err != nil {
- return err
- }
- ka.params = params
-
- ka.preMasterSecret = params.SharedKey(publicKey)
- if ka.preMasterSecret == nil {
- return errServerKeyExchange
- }
-
- ourPublicKey := params.PublicKey()
- ka.ckx = new(clientKeyExchangeMsg)
- ka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey))
- ka.ckx.ciphertext[0] = byte(len(ourPublicKey))
- copy(ka.ckx.ciphertext[1:], ourPublicKey)
-
- var sigType uint8
- var sigHash crypto.Hash
- if ka.version >= VersionTLS12 {
- signatureAlgorithm := SignatureScheme(sig[0])<<8 | SignatureScheme(sig[1])
- sig = sig[2:]
- if len(sig) < 2 {
- return errServerKeyExchange
- }
-
- if !isSupportedSignatureAlgorithm(signatureAlgorithm, clientHello.supportedSignatureAlgorithms) {
- return errors.New("tls: certificate used with invalid signature algorithm")
- }
- sigType, sigHash, err = typeAndHashFromSignatureScheme(signatureAlgorithm)
- if err != nil {
- return err
- }
- } else {
- sigType, sigHash, err = legacyTypeAndHashFromPublicKey(cert.PublicKey)
- if err != nil {
- return err
- }
- }
- if (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA {
- return errServerKeyExchange
- }
-
- sigLen := int(sig[0])<<8 | int(sig[1])
- if sigLen+2 != len(sig) {
- return errServerKeyExchange
- }
- sig = sig[2:]
-
- signed := hashForServerKeyExchange(sigType, sigHash, ka.version, clientHello.random, serverHello.random, serverECDHEParams)
- if err := verifyHandshakeSignature(sigType, cert.PublicKey, sigHash, signed, sig); err != nil {
- return errors.New("tls: invalid signature by the server certificate: " + err.Error())
- }
- return nil
-}
-
-func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
- if ka.ckx == nil {
- return nil, nil, errors.New("tls: missing ServerKeyExchange message")
- }
-
- return ka.preMasterSecret, ka.ckx, nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go b/vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go
deleted file mode 100644
index da13904a6..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto/elliptic"
- "crypto/hmac"
- "errors"
- "hash"
- "io"
- "math/big"
-
- "golang.org/x/crypto/cryptobyte"
- "golang.org/x/crypto/curve25519"
- "golang.org/x/crypto/hkdf"
-)
-
-// This file contains the functions necessary to compute the TLS 1.3 key
-// schedule. See RFC 8446, Section 7.
-
-const (
- resumptionBinderLabel = "res binder"
- clientHandshakeTrafficLabel = "c hs traffic"
- serverHandshakeTrafficLabel = "s hs traffic"
- clientApplicationTrafficLabel = "c ap traffic"
- serverApplicationTrafficLabel = "s ap traffic"
- exporterLabel = "exp master"
- resumptionLabel = "res master"
- trafficUpdateLabel = "traffic upd"
-)
-
-// expandLabel implements HKDF-Expand-Label from RFC 8446, Section 7.1.
-func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []byte, length int) []byte {
- var hkdfLabel cryptobyte.Builder
- hkdfLabel.AddUint16(uint16(length))
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte("tls13 "))
- b.AddBytes([]byte(label))
- })
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(context)
- })
- out := make([]byte, length)
- n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out)
- if err != nil || n != length {
- panic("tls: HKDF-Expand-Label invocation failed unexpectedly")
- }
- return out
-}
-
-// deriveSecret implements Derive-Secret from RFC 8446, Section 7.1.
-func (c *cipherSuiteTLS13) deriveSecret(secret []byte, label string, transcript hash.Hash) []byte {
- if transcript == nil {
- transcript = c.hash.New()
- }
- return c.expandLabel(secret, label, transcript.Sum(nil), c.hash.Size())
-}
-
-// extract implements HKDF-Extract with the cipher suite hash.
-func (c *cipherSuiteTLS13) extract(newSecret, currentSecret []byte) []byte {
- if newSecret == nil {
- newSecret = make([]byte, c.hash.Size())
- }
- return hkdf.Extract(c.hash.New, newSecret, currentSecret)
-}
-
-// nextTrafficSecret generates the next traffic secret, given the current one,
-// according to RFC 8446, Section 7.2.
-func (c *cipherSuiteTLS13) nextTrafficSecret(trafficSecret []byte) []byte {
- return c.expandLabel(trafficSecret, trafficUpdateLabel, nil, c.hash.Size())
-}
-
-// trafficKey generates traffic keys according to RFC 8446, Section 7.3.
-func (c *cipherSuiteTLS13) trafficKey(trafficSecret []byte) (key, iv []byte) {
- key = c.expandLabel(trafficSecret, "key", nil, c.keyLen)
- iv = c.expandLabel(trafficSecret, "iv", nil, aeadNonceLength)
- return
-}
-
-// finishedHash generates the Finished verify_data or PskBinderEntry according
-// to RFC 8446, Section 4.4.4. See sections 4.4 and 4.2.11.2 for the baseKey
-// selection.
-func (c *cipherSuiteTLS13) finishedHash(baseKey []byte, transcript hash.Hash) []byte {
- finishedKey := c.expandLabel(baseKey, "finished", nil, c.hash.Size())
- verifyData := hmac.New(c.hash.New, finishedKey)
- verifyData.Write(transcript.Sum(nil))
- return verifyData.Sum(nil)
-}
-
-// exportKeyingMaterial implements RFC5705 exporters for TLS 1.3 according to
-// RFC 8446, Section 7.5.
-func (c *cipherSuiteTLS13) exportKeyingMaterial(masterSecret []byte, transcript hash.Hash) func(string, []byte, int) ([]byte, error) {
- expMasterSecret := c.deriveSecret(masterSecret, exporterLabel, transcript)
- return func(label string, context []byte, length int) ([]byte, error) {
- secret := c.deriveSecret(expMasterSecret, label, nil)
- h := c.hash.New()
- h.Write(context)
- return c.expandLabel(secret, "exporter", h.Sum(nil), length), nil
- }
-}
-
-// ecdheParameters implements Diffie-Hellman with either NIST curves or X25519,
-// according to RFC 8446, Section 4.2.8.2.
-type ecdheParameters interface {
- CurveID() CurveID
- PublicKey() []byte
- SharedKey(peerPublicKey []byte) []byte
-}
-
-func generateECDHEParameters(rand io.Reader, curveID CurveID) (ecdheParameters, error) {
- if curveID == X25519 {
- privateKey := make([]byte, curve25519.ScalarSize)
- if _, err := io.ReadFull(rand, privateKey); err != nil {
- return nil, err
- }
- publicKey, err := curve25519.X25519(privateKey, curve25519.Basepoint)
- if err != nil {
- return nil, err
- }
- return &x25519Parameters{privateKey: privateKey, publicKey: publicKey}, nil
- }
-
- curve, ok := curveForCurveID(curveID)
- if !ok {
- return nil, errors.New("tls: internal error: unsupported curve")
- }
-
- p := &nistParameters{curveID: curveID}
- var err error
- p.privateKey, p.x, p.y, err = elliptic.GenerateKey(curve, rand)
- if err != nil {
- return nil, err
- }
- return p, nil
-}
-
-func curveForCurveID(id CurveID) (elliptic.Curve, bool) {
- switch id {
- case CurveP256:
- return elliptic.P256(), true
- case CurveP384:
- return elliptic.P384(), true
- case CurveP521:
- return elliptic.P521(), true
- default:
- return nil, false
- }
-}
-
-type nistParameters struct {
- privateKey []byte
- x, y *big.Int // public key
- curveID CurveID
-}
-
-func (p *nistParameters) CurveID() CurveID {
- return p.curveID
-}
-
-func (p *nistParameters) PublicKey() []byte {
- curve, _ := curveForCurveID(p.curveID)
- return elliptic.Marshal(curve, p.x, p.y)
-}
-
-func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte {
- curve, _ := curveForCurveID(p.curveID)
- // Unmarshal also checks whether the given point is on the curve.
- x, y := elliptic.Unmarshal(curve, peerPublicKey)
- if x == nil {
- return nil
- }
-
- xShared, _ := curve.ScalarMult(x, y, p.privateKey)
- sharedKey := make([]byte, (curve.Params().BitSize+7)/8)
- return xShared.FillBytes(sharedKey)
-}
-
-type x25519Parameters struct {
- privateKey []byte
- publicKey []byte
-}
-
-func (p *x25519Parameters) CurveID() CurveID {
- return X25519
-}
-
-func (p *x25519Parameters) PublicKey() []byte {
- return p.publicKey[:]
-}
-
-func (p *x25519Parameters) SharedKey(peerPublicKey []byte) []byte {
- sharedKey, err := curve25519.X25519(p.privateKey, peerPublicKey)
- if err != nil {
- return nil
- }
- return sharedKey
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/prf.go b/vendor/github.com/marten-seemann/qtls-go1-19/prf.go
deleted file mode 100644
index 9eb0221a0..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/prf.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "crypto"
- "crypto/hmac"
- "crypto/md5"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/sha512"
- "errors"
- "fmt"
- "hash"
-)
-
-// Split a premaster secret in two as specified in RFC 4346, Section 5.
-func splitPreMasterSecret(secret []byte) (s1, s2 []byte) {
- s1 = secret[0 : (len(secret)+1)/2]
- s2 = secret[len(secret)/2:]
- return
-}
-
-// pHash implements the P_hash function, as defined in RFC 4346, Section 5.
-func pHash(result, secret, seed []byte, hash func() hash.Hash) {
- h := hmac.New(hash, secret)
- h.Write(seed)
- a := h.Sum(nil)
-
- j := 0
- for j < len(result) {
- h.Reset()
- h.Write(a)
- h.Write(seed)
- b := h.Sum(nil)
- copy(result[j:], b)
- j += len(b)
-
- h.Reset()
- h.Write(a)
- a = h.Sum(nil)
- }
-}
-
-// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, Section 5.
-func prf10(result, secret, label, seed []byte) {
- hashSHA1 := sha1.New
- hashMD5 := md5.New
-
- labelAndSeed := make([]byte, len(label)+len(seed))
- copy(labelAndSeed, label)
- copy(labelAndSeed[len(label):], seed)
-
- s1, s2 := splitPreMasterSecret(secret)
- pHash(result, s1, labelAndSeed, hashMD5)
- result2 := make([]byte, len(result))
- pHash(result2, s2, labelAndSeed, hashSHA1)
-
- for i, b := range result2 {
- result[i] ^= b
- }
-}
-
-// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, Section 5.
-func prf12(hashFunc func() hash.Hash) func(result, secret, label, seed []byte) {
- return func(result, secret, label, seed []byte) {
- labelAndSeed := make([]byte, len(label)+len(seed))
- copy(labelAndSeed, label)
- copy(labelAndSeed[len(label):], seed)
-
- pHash(result, secret, labelAndSeed, hashFunc)
- }
-}
-
-const (
- masterSecretLength = 48 // Length of a master secret in TLS 1.1.
- finishedVerifyLength = 12 // Length of verify_data in a Finished message.
-)
-
-var masterSecretLabel = []byte("master secret")
-var keyExpansionLabel = []byte("key expansion")
-var clientFinishedLabel = []byte("client finished")
-var serverFinishedLabel = []byte("server finished")
-
-func prfAndHashForVersion(version uint16, suite *cipherSuite) (func(result, secret, label, seed []byte), crypto.Hash) {
- switch version {
- case VersionTLS10, VersionTLS11:
- return prf10, crypto.Hash(0)
- case VersionTLS12:
- if suite.flags&suiteSHA384 != 0 {
- return prf12(sha512.New384), crypto.SHA384
- }
- return prf12(sha256.New), crypto.SHA256
- default:
- panic("unknown version")
- }
-}
-
-func prfForVersion(version uint16, suite *cipherSuite) func(result, secret, label, seed []byte) {
- prf, _ := prfAndHashForVersion(version, suite)
- return prf
-}
-
-// masterFromPreMasterSecret generates the master secret from the pre-master
-// secret. See RFC 5246, Section 8.1.
-func masterFromPreMasterSecret(version uint16, suite *cipherSuite, preMasterSecret, clientRandom, serverRandom []byte) []byte {
- seed := make([]byte, 0, len(clientRandom)+len(serverRandom))
- seed = append(seed, clientRandom...)
- seed = append(seed, serverRandom...)
-
- masterSecret := make([]byte, masterSecretLength)
- prfForVersion(version, suite)(masterSecret, preMasterSecret, masterSecretLabel, seed)
- return masterSecret
-}
-
-// keysFromMasterSecret generates the connection keys from the master
-// secret, given the lengths of the MAC key, cipher key and IV, as defined in
-// RFC 2246, Section 6.3.
-func keysFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) {
- seed := make([]byte, 0, len(serverRandom)+len(clientRandom))
- seed = append(seed, serverRandom...)
- seed = append(seed, clientRandom...)
-
- n := 2*macLen + 2*keyLen + 2*ivLen
- keyMaterial := make([]byte, n)
- prfForVersion(version, suite)(keyMaterial, masterSecret, keyExpansionLabel, seed)
- clientMAC = keyMaterial[:macLen]
- keyMaterial = keyMaterial[macLen:]
- serverMAC = keyMaterial[:macLen]
- keyMaterial = keyMaterial[macLen:]
- clientKey = keyMaterial[:keyLen]
- keyMaterial = keyMaterial[keyLen:]
- serverKey = keyMaterial[:keyLen]
- keyMaterial = keyMaterial[keyLen:]
- clientIV = keyMaterial[:ivLen]
- keyMaterial = keyMaterial[ivLen:]
- serverIV = keyMaterial[:ivLen]
- return
-}
-
-func newFinishedHash(version uint16, cipherSuite *cipherSuite) finishedHash {
- var buffer []byte
- if version >= VersionTLS12 {
- buffer = []byte{}
- }
-
- prf, hash := prfAndHashForVersion(version, cipherSuite)
- if hash != 0 {
- return finishedHash{hash.New(), hash.New(), nil, nil, buffer, version, prf}
- }
-
- return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), buffer, version, prf}
-}
-
-// A finishedHash calculates the hash of a set of handshake messages suitable
-// for including in a Finished message.
-type finishedHash struct {
- client hash.Hash
- server hash.Hash
-
- // Prior to TLS 1.2, an additional MD5 hash is required.
- clientMD5 hash.Hash
- serverMD5 hash.Hash
-
- // In TLS 1.2, a full buffer is sadly required.
- buffer []byte
-
- version uint16
- prf func(result, secret, label, seed []byte)
-}
-
-func (h *finishedHash) Write(msg []byte) (n int, err error) {
- h.client.Write(msg)
- h.server.Write(msg)
-
- if h.version < VersionTLS12 {
- h.clientMD5.Write(msg)
- h.serverMD5.Write(msg)
- }
-
- if h.buffer != nil {
- h.buffer = append(h.buffer, msg...)
- }
-
- return len(msg), nil
-}
-
-func (h finishedHash) Sum() []byte {
- if h.version >= VersionTLS12 {
- return h.client.Sum(nil)
- }
-
- out := make([]byte, 0, md5.Size+sha1.Size)
- out = h.clientMD5.Sum(out)
- return h.client.Sum(out)
-}
-
-// clientSum returns the contents of the verify_data member of a client's
-// Finished message.
-func (h finishedHash) clientSum(masterSecret []byte) []byte {
- out := make([]byte, finishedVerifyLength)
- h.prf(out, masterSecret, clientFinishedLabel, h.Sum())
- return out
-}
-
-// serverSum returns the contents of the verify_data member of a server's
-// Finished message.
-func (h finishedHash) serverSum(masterSecret []byte) []byte {
- out := make([]byte, finishedVerifyLength)
- h.prf(out, masterSecret, serverFinishedLabel, h.Sum())
- return out
-}
-
-// hashForClientCertificate returns the handshake messages so far, pre-hashed if
-// necessary, suitable for signing by a TLS client certificate.
-func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash, masterSecret []byte) []byte {
- if (h.version >= VersionTLS12 || sigType == signatureEd25519) && h.buffer == nil {
- panic("tls: handshake hash for a client certificate requested after discarding the handshake buffer")
- }
-
- if sigType == signatureEd25519 {
- return h.buffer
- }
-
- if h.version >= VersionTLS12 {
- hash := hashAlg.New()
- hash.Write(h.buffer)
- return hash.Sum(nil)
- }
-
- if sigType == signatureECDSA {
- return h.server.Sum(nil)
- }
-
- return h.Sum()
-}
-
-// discardHandshakeBuffer is called when there is no more need to
-// buffer the entirety of the handshake messages.
-func (h *finishedHash) discardHandshakeBuffer() {
- h.buffer = nil
-}
-
-// noExportedKeyingMaterial is used as a value of
-// ConnectionState.ekm when renegotiation is enabled and thus
-// we wish to fail all key-material export requests.
-func noExportedKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
- return nil, errors.New("crypto/tls: ExportKeyingMaterial is unavailable when renegotiation is enabled")
-}
-
-// ekmFromMasterSecret generates exported keying material as defined in RFC 5705.
-func ekmFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte) func(string, []byte, int) ([]byte, error) {
- return func(label string, context []byte, length int) ([]byte, error) {
- switch label {
- case "client finished", "server finished", "master secret", "key expansion":
- // These values are reserved and may not be used.
- return nil, fmt.Errorf("crypto/tls: reserved ExportKeyingMaterial label: %s", label)
- }
-
- seedLen := len(serverRandom) + len(clientRandom)
- if context != nil {
- seedLen += 2 + len(context)
- }
- seed := make([]byte, 0, seedLen)
-
- seed = append(seed, clientRandom...)
- seed = append(seed, serverRandom...)
-
- if context != nil {
- if len(context) >= 1<<16 {
- return nil, fmt.Errorf("crypto/tls: ExportKeyingMaterial context too long")
- }
- seed = append(seed, byte(len(context)>>8), byte(len(context)))
- seed = append(seed, context...)
- }
-
- keyMaterial := make([]byte, length)
- prfForVersion(version, suite)(keyMaterial, masterSecret, []byte(label), seed)
- return keyMaterial, nil
- }
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/ticket.go b/vendor/github.com/marten-seemann/qtls-go1-19/ticket.go
deleted file mode 100644
index 81e8a52ea..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/ticket.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package qtls
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/cipher"
- "crypto/hmac"
- "crypto/sha256"
- "crypto/subtle"
- "encoding/binary"
- "errors"
- "io"
- "time"
-
- "golang.org/x/crypto/cryptobyte"
-)
-
-// sessionState contains the information that is serialized into a session
-// ticket in order to later resume a connection.
-type sessionState struct {
- vers uint16
- cipherSuite uint16
- createdAt uint64
- masterSecret []byte // opaque master_secret<1..2^16-1>;
- // struct { opaque certificate<1..2^24-1> } Certificate;
- certificates [][]byte // Certificate certificate_list<0..2^24-1>;
-
- // usedOldKey is true if the ticket from which this session came from
- // was encrypted with an older key and thus should be refreshed.
- usedOldKey bool
-}
-
-func (m *sessionState) marshal() []byte {
- var b cryptobyte.Builder
- b.AddUint16(m.vers)
- b.AddUint16(m.cipherSuite)
- addUint64(&b, m.createdAt)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.masterSecret)
- })
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, cert := range m.certificates {
- b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(cert)
- })
- }
- })
- return b.BytesOrPanic()
-}
-
-func (m *sessionState) unmarshal(data []byte) bool {
- *m = sessionState{usedOldKey: m.usedOldKey}
- s := cryptobyte.String(data)
- if ok := s.ReadUint16(&m.vers) &&
- s.ReadUint16(&m.cipherSuite) &&
- readUint64(&s, &m.createdAt) &&
- readUint16LengthPrefixed(&s, &m.masterSecret) &&
- len(m.masterSecret) != 0; !ok {
- return false
- }
- var certList cryptobyte.String
- if !s.ReadUint24LengthPrefixed(&certList) {
- return false
- }
- for !certList.Empty() {
- var cert []byte
- if !readUint24LengthPrefixed(&certList, &cert) {
- return false
- }
- m.certificates = append(m.certificates, cert)
- }
- return s.Empty()
-}
-
-// sessionStateTLS13 is the content of a TLS 1.3 session ticket. Its first
-// version (revision = 0) doesn't carry any of the information needed for 0-RTT
-// validation and the nonce is always empty.
-// version (revision = 1) carries the max_early_data_size sent in the ticket.
-// version (revision = 2) carries the ALPN sent in the ticket.
-type sessionStateTLS13 struct {
- // uint8 version = 0x0304;
- // uint8 revision = 2;
- cipherSuite uint16
- createdAt uint64
- resumptionSecret []byte // opaque resumption_master_secret<1..2^8-1>;
- certificate Certificate // CertificateEntry certificate_list<0..2^24-1>;
- maxEarlyData uint32
- alpn string
-
- appData []byte
-}
-
-func (m *sessionStateTLS13) marshal() []byte {
- var b cryptobyte.Builder
- b.AddUint16(VersionTLS13)
- b.AddUint8(2) // revision
- b.AddUint16(m.cipherSuite)
- addUint64(&b, m.createdAt)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.resumptionSecret)
- })
- marshalCertificate(&b, m.certificate)
- b.AddUint32(m.maxEarlyData)
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpn))
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.appData)
- })
- return b.BytesOrPanic()
-}
-
-func (m *sessionStateTLS13) unmarshal(data []byte) bool {
- *m = sessionStateTLS13{}
- s := cryptobyte.String(data)
- var version uint16
- var revision uint8
- var alpn []byte
- ret := s.ReadUint16(&version) &&
- version == VersionTLS13 &&
- s.ReadUint8(&revision) &&
- revision == 2 &&
- s.ReadUint16(&m.cipherSuite) &&
- readUint64(&s, &m.createdAt) &&
- readUint8LengthPrefixed(&s, &m.resumptionSecret) &&
- len(m.resumptionSecret) != 0 &&
- unmarshalCertificate(&s, &m.certificate) &&
- s.ReadUint32(&m.maxEarlyData) &&
- readUint8LengthPrefixed(&s, &alpn) &&
- readUint16LengthPrefixed(&s, &m.appData) &&
- s.Empty()
- m.alpn = string(alpn)
- return ret
-}
-
-func (c *Conn) encryptTicket(state []byte) ([]byte, error) {
- if len(c.ticketKeys) == 0 {
- return nil, errors.New("tls: internal error: session ticket keys unavailable")
- }
-
- encrypted := make([]byte, ticketKeyNameLen+aes.BlockSize+len(state)+sha256.Size)
- keyName := encrypted[:ticketKeyNameLen]
- iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
- macBytes := encrypted[len(encrypted)-sha256.Size:]
-
- if _, err := io.ReadFull(c.config.rand(), iv); err != nil {
- return nil, err
- }
- key := c.ticketKeys[0]
- copy(keyName, key.keyName[:])
- block, err := aes.NewCipher(key.aesKey[:])
- if err != nil {
- return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error())
- }
- cipher.NewCTR(block, iv).XORKeyStream(encrypted[ticketKeyNameLen+aes.BlockSize:], state)
-
- mac := hmac.New(sha256.New, key.hmacKey[:])
- mac.Write(encrypted[:len(encrypted)-sha256.Size])
- mac.Sum(macBytes[:0])
-
- return encrypted, nil
-}
-
-func (c *Conn) decryptTicket(encrypted []byte) (plaintext []byte, usedOldKey bool) {
- if len(encrypted) < ticketKeyNameLen+aes.BlockSize+sha256.Size {
- return nil, false
- }
-
- keyName := encrypted[:ticketKeyNameLen]
- iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
- macBytes := encrypted[len(encrypted)-sha256.Size:]
- ciphertext := encrypted[ticketKeyNameLen+aes.BlockSize : len(encrypted)-sha256.Size]
-
- keyIndex := -1
- for i, candidateKey := range c.ticketKeys {
- if bytes.Equal(keyName, candidateKey.keyName[:]) {
- keyIndex = i
- break
- }
- }
- if keyIndex == -1 {
- return nil, false
- }
- key := &c.ticketKeys[keyIndex]
-
- mac := hmac.New(sha256.New, key.hmacKey[:])
- mac.Write(encrypted[:len(encrypted)-sha256.Size])
- expected := mac.Sum(nil)
-
- if subtle.ConstantTimeCompare(macBytes, expected) != 1 {
- return nil, false
- }
-
- block, err := aes.NewCipher(key.aesKey[:])
- if err != nil {
- return nil, false
- }
- plaintext = make([]byte, len(ciphertext))
- cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext)
-
- return plaintext, keyIndex > 0
-}
-
-func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, error) {
- m := new(newSessionTicketMsgTLS13)
-
- var certsFromClient [][]byte
- for _, cert := range c.peerCertificates {
- certsFromClient = append(certsFromClient, cert.Raw)
- }
- state := sessionStateTLS13{
- cipherSuite: c.cipherSuite,
- createdAt: uint64(c.config.time().Unix()),
- resumptionSecret: c.resumptionSecret,
- certificate: Certificate{
- Certificate: certsFromClient,
- OCSPStaple: c.ocspResponse,
- SignedCertificateTimestamps: c.scts,
- },
- appData: appData,
- alpn: c.clientProtocol,
- }
- if c.extraConfig != nil {
- state.maxEarlyData = c.extraConfig.MaxEarlyData
- }
- var err error
- m.label, err = c.encryptTicket(state.marshal())
- if err != nil {
- return nil, err
- }
- m.lifetime = uint32(maxSessionTicketLifetime / time.Second)
-
- // ticket_age_add is a random 32-bit value. See RFC 8446, section 4.6.1
- // The value is not stored anywhere; we never need to check the ticket age
- // because 0-RTT is not supported.
- ageAdd := make([]byte, 4)
- _, err = c.config.rand().Read(ageAdd)
- if err != nil {
- return nil, err
- }
- m.ageAdd = binary.LittleEndian.Uint32(ageAdd)
-
- // ticket_nonce, which must be unique per connection, is always left at
- // zero because we only ever send one ticket per connection.
-
- if c.extraConfig != nil {
- m.maxEarlyData = c.extraConfig.MaxEarlyData
- }
- return m, nil
-}
-
-// GetSessionTicket generates a new session ticket.
-// It should only be called after the handshake completes.
-// It can only be used for servers, and only if the alternative record layer is set.
-// The ticket may be nil if config.SessionTicketsDisabled is set,
-// or if the client isn't able to receive session tickets.
-func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) {
- if c.isClient || !c.handshakeComplete() || c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil {
- return nil, errors.New("GetSessionTicket is only valid for servers after completion of the handshake, and if an alternative record layer is set.")
- }
- if c.config.SessionTicketsDisabled {
- return nil, nil
- }
-
- m, err := c.getSessionTicketMsg(appData)
- if err != nil {
- return nil, err
- }
- return m.marshal(), nil
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/tls.go b/vendor/github.com/marten-seemann/qtls-go1-19/tls.go
deleted file mode 100644
index 42207c235..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/tls.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package qtls partially implements TLS 1.2, as specified in RFC 5246,
-// and TLS 1.3, as specified in RFC 8446.
-package qtls
-
-// BUG(agl): The crypto/tls package only implements some countermeasures
-// against Lucky13 attacks on CBC-mode encryption, and only on SHA1
-// variants. See http://www.isg.rhul.ac.uk/tls/TLStiming.pdf and
-// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
-
-import (
- "bytes"
- "context"
- "crypto"
- "crypto/ecdsa"
- "crypto/ed25519"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
- "fmt"
- "net"
- "os"
- "strings"
-)
-
-// Server returns a new TLS server side connection
-// using conn as the underlying transport.
-// The configuration config must be non-nil and must include
-// at least one certificate or else set GetCertificate.
-func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- c := &Conn{
- conn: conn,
- config: fromConfig(config),
- extraConfig: extraConfig,
- }
- c.handshakeFn = c.serverHandshake
- return c
-}
-
-// Client returns a new TLS client side connection
-// using conn as the underlying transport.
-// The config cannot be nil: users must set either ServerName or
-// InsecureSkipVerify in the config.
-func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
- c := &Conn{
- conn: conn,
- config: fromConfig(config),
- extraConfig: extraConfig,
- isClient: true,
- }
- c.handshakeFn = c.clientHandshake
- return c
-}
-
-// A listener implements a network listener (net.Listener) for TLS connections.
-type listener struct {
- net.Listener
- config *Config
- extraConfig *ExtraConfig
-}
-
-// Accept waits for and returns the next incoming TLS connection.
-// The returned connection is of type *Conn.
-func (l *listener) Accept() (net.Conn, error) {
- c, err := l.Listener.Accept()
- if err != nil {
- return nil, err
- }
- return Server(c, l.config, l.extraConfig), nil
-}
-
-// NewListener creates a Listener which accepts connections from an inner
-// Listener and wraps each connection with Server.
-// The configuration config must be non-nil and must include
-// at least one certificate or else set GetCertificate.
-func NewListener(inner net.Listener, config *Config, extraConfig *ExtraConfig) net.Listener {
- l := new(listener)
- l.Listener = inner
- l.config = config
- l.extraConfig = extraConfig
- return l
-}
-
-// Listen creates a TLS listener accepting connections on the
-// given network address using net.Listen.
-// The configuration config must be non-nil and must include
-// at least one certificate or else set GetCertificate.
-func Listen(network, laddr string, config *Config, extraConfig *ExtraConfig) (net.Listener, error) {
- if config == nil || len(config.Certificates) == 0 &&
- config.GetCertificate == nil && config.GetConfigForClient == nil {
- return nil, errors.New("tls: neither Certificates, GetCertificate, nor GetConfigForClient set in Config")
- }
- l, err := net.Listen(network, laddr)
- if err != nil {
- return nil, err
- }
- return NewListener(l, config, extraConfig), nil
-}
-
-type timeoutError struct{}
-
-func (timeoutError) Error() string { return "tls: DialWithDialer timed out" }
-func (timeoutError) Timeout() bool { return true }
-func (timeoutError) Temporary() bool { return true }
-
-// DialWithDialer connects to the given network address using dialer.Dial and
-// then initiates a TLS handshake, returning the resulting TLS connection. Any
-// timeout or deadline given in the dialer apply to connection and TLS
-// handshake as a whole.
-//
-// DialWithDialer interprets a nil configuration as equivalent to the zero
-// configuration; see the documentation of Config for the defaults.
-//
-// DialWithDialer uses context.Background internally; to specify the context,
-// use Dialer.DialContext with NetDialer set to the desired dialer.
-func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config, extraConfig *ExtraConfig) (*Conn, error) {
- return dial(context.Background(), dialer, network, addr, config, extraConfig)
-}
-
-func dial(ctx context.Context, netDialer *net.Dialer, network, addr string, config *Config, extraConfig *ExtraConfig) (*Conn, error) {
- if netDialer.Timeout != 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, netDialer.Timeout)
- defer cancel()
- }
-
- if !netDialer.Deadline.IsZero() {
- var cancel context.CancelFunc
- ctx, cancel = context.WithDeadline(ctx, netDialer.Deadline)
- defer cancel()
- }
-
- rawConn, err := netDialer.DialContext(ctx, network, addr)
- if err != nil {
- return nil, err
- }
-
- colonPos := strings.LastIndex(addr, ":")
- if colonPos == -1 {
- colonPos = len(addr)
- }
- hostname := addr[:colonPos]
-
- if config == nil {
- config = defaultConfig()
- }
- // If no ServerName is set, infer the ServerName
- // from the hostname we're connecting to.
- if config.ServerName == "" {
- // Make a copy to avoid polluting argument or default.
- c := config.Clone()
- c.ServerName = hostname
- config = c
- }
-
- conn := Client(rawConn, config, extraConfig)
- if err := conn.HandshakeContext(ctx); err != nil {
- rawConn.Close()
- return nil, err
- }
- return conn, nil
-}
-
-// Dial connects to the given network address using net.Dial
-// and then initiates a TLS handshake, returning the resulting
-// TLS connection.
-// Dial interprets a nil configuration as equivalent to
-// the zero configuration; see the documentation of Config
-// for the defaults.
-func Dial(network, addr string, config *Config, extraConfig *ExtraConfig) (*Conn, error) {
- return DialWithDialer(new(net.Dialer), network, addr, config, extraConfig)
-}
-
-// Dialer dials TLS connections given a configuration and a Dialer for the
-// underlying connection.
-type Dialer struct {
- // NetDialer is the optional dialer to use for the TLS connections'
- // underlying TCP connections.
- // A nil NetDialer is equivalent to the net.Dialer zero value.
- NetDialer *net.Dialer
-
- // Config is the TLS configuration to use for new connections.
- // A nil configuration is equivalent to the zero
- // configuration; see the documentation of Config for the
- // defaults.
- Config *Config
-
- ExtraConfig *ExtraConfig
-}
-
-// Dial connects to the given network address and initiates a TLS
-// handshake, returning the resulting TLS connection.
-//
-// The returned Conn, if any, will always be of type *Conn.
-//
-// Dial uses context.Background internally; to specify the context,
-// use DialContext.
-func (d *Dialer) Dial(network, addr string) (net.Conn, error) {
- return d.DialContext(context.Background(), network, addr)
-}
-
-func (d *Dialer) netDialer() *net.Dialer {
- if d.NetDialer != nil {
- return d.NetDialer
- }
- return new(net.Dialer)
-}
-
-// DialContext connects to the given network address and initiates a TLS
-// handshake, returning the resulting TLS connection.
-//
-// The provided Context must be non-nil. If the context expires before
-// the connection is complete, an error is returned. Once successfully
-// connected, any expiration of the context will not affect the
-// connection.
-//
-// The returned Conn, if any, will always be of type *Conn.
-func (d *Dialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
- c, err := dial(ctx, d.netDialer(), network, addr, d.Config, d.ExtraConfig)
- if err != nil {
- // Don't return c (a typed nil) in an interface.
- return nil, err
- }
- return c, nil
-}
-
-// LoadX509KeyPair reads and parses a public/private key pair from a pair
-// of files. The files must contain PEM encoded data. The certificate file
-// may contain intermediate certificates following the leaf certificate to
-// form a certificate chain. On successful return, Certificate.Leaf will
-// be nil because the parsed form of the certificate is not retained.
-func LoadX509KeyPair(certFile, keyFile string) (Certificate, error) {
- certPEMBlock, err := os.ReadFile(certFile)
- if err != nil {
- return Certificate{}, err
- }
- keyPEMBlock, err := os.ReadFile(keyFile)
- if err != nil {
- return Certificate{}, err
- }
- return X509KeyPair(certPEMBlock, keyPEMBlock)
-}
-
-// X509KeyPair parses a public/private key pair from a pair of
-// PEM encoded data. On successful return, Certificate.Leaf will be nil because
-// the parsed form of the certificate is not retained.
-func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
- fail := func(err error) (Certificate, error) { return Certificate{}, err }
-
- var cert Certificate
- var skippedBlockTypes []string
- for {
- var certDERBlock *pem.Block
- certDERBlock, certPEMBlock = pem.Decode(certPEMBlock)
- if certDERBlock == nil {
- break
- }
- if certDERBlock.Type == "CERTIFICATE" {
- cert.Certificate = append(cert.Certificate, certDERBlock.Bytes)
- } else {
- skippedBlockTypes = append(skippedBlockTypes, certDERBlock.Type)
- }
- }
-
- if len(cert.Certificate) == 0 {
- if len(skippedBlockTypes) == 0 {
- return fail(errors.New("tls: failed to find any PEM data in certificate input"))
- }
- if len(skippedBlockTypes) == 1 && strings.HasSuffix(skippedBlockTypes[0], "PRIVATE KEY") {
- return fail(errors.New("tls: failed to find certificate PEM data in certificate input, but did find a private key; PEM inputs may have been switched"))
- }
- return fail(fmt.Errorf("tls: failed to find \"CERTIFICATE\" PEM block in certificate input after skipping PEM blocks of the following types: %v", skippedBlockTypes))
- }
-
- skippedBlockTypes = skippedBlockTypes[:0]
- var keyDERBlock *pem.Block
- for {
- keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock)
- if keyDERBlock == nil {
- if len(skippedBlockTypes) == 0 {
- return fail(errors.New("tls: failed to find any PEM data in key input"))
- }
- if len(skippedBlockTypes) == 1 && skippedBlockTypes[0] == "CERTIFICATE" {
- return fail(errors.New("tls: found a certificate rather than a key in the PEM for the private key"))
- }
- return fail(fmt.Errorf("tls: failed to find PEM block with type ending in \"PRIVATE KEY\" in key input after skipping PEM blocks of the following types: %v", skippedBlockTypes))
- }
- if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") {
- break
- }
- skippedBlockTypes = append(skippedBlockTypes, keyDERBlock.Type)
- }
-
- // We don't need to parse the public key for TLS, but we so do anyway
- // to check that it looks sane and matches the private key.
- x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
- if err != nil {
- return fail(err)
- }
-
- cert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes)
- if err != nil {
- return fail(err)
- }
-
- switch pub := x509Cert.PublicKey.(type) {
- case *rsa.PublicKey:
- priv, ok := cert.PrivateKey.(*rsa.PrivateKey)
- if !ok {
- return fail(errors.New("tls: private key type does not match public key type"))
- }
- if pub.N.Cmp(priv.N) != 0 {
- return fail(errors.New("tls: private key does not match public key"))
- }
- case *ecdsa.PublicKey:
- priv, ok := cert.PrivateKey.(*ecdsa.PrivateKey)
- if !ok {
- return fail(errors.New("tls: private key type does not match public key type"))
- }
- if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 {
- return fail(errors.New("tls: private key does not match public key"))
- }
- case ed25519.PublicKey:
- priv, ok := cert.PrivateKey.(ed25519.PrivateKey)
- if !ok {
- return fail(errors.New("tls: private key type does not match public key type"))
- }
- if !bytes.Equal(priv.Public().(ed25519.PublicKey), pub) {
- return fail(errors.New("tls: private key does not match public key"))
- }
- default:
- return fail(errors.New("tls: unknown public key algorithm"))
- }
-
- return cert, nil
-}
-
-// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates
-// PKCS #1 private keys by default, while OpenSSL 1.0.0 generates PKCS #8 keys.
-// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three.
-func parsePrivateKey(der []byte) (crypto.PrivateKey, error) {
- if key, err := x509.ParsePKCS1PrivateKey(der); err == nil {
- return key, nil
- }
- if key, err := x509.ParsePKCS8PrivateKey(der); err == nil {
- switch key := key.(type) {
- case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
- return key, nil
- default:
- return nil, errors.New("tls: found unknown private key type in PKCS#8 wrapping")
- }
- }
- if key, err := x509.ParseECPrivateKey(der); err == nil {
- return key, nil
- }
-
- return nil, errors.New("tls: failed to parse private key")
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/unsafe.go b/vendor/github.com/marten-seemann/qtls-go1-19/unsafe.go
deleted file mode 100644
index 55fa01b3d..000000000
--- a/vendor/github.com/marten-seemann/qtls-go1-19/unsafe.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package qtls
-
-import (
- "crypto/tls"
- "reflect"
- "unsafe"
-)
-
-func init() {
- if !structsEqual(&tls.ConnectionState{}, &connectionState{}) {
- panic("qtls.ConnectionState doesn't match")
- }
- if !structsEqual(&tls.ClientSessionState{}, &clientSessionState{}) {
- panic("qtls.ClientSessionState doesn't match")
- }
- if !structsEqual(&tls.CertificateRequestInfo{}, &certificateRequestInfo{}) {
- panic("qtls.CertificateRequestInfo doesn't match")
- }
- if !structsEqual(&tls.Config{}, &config{}) {
- panic("qtls.Config doesn't match")
- }
- if !structsEqual(&tls.ClientHelloInfo{}, &clientHelloInfo{}) {
- panic("qtls.ClientHelloInfo doesn't match")
- }
-}
-
-func toConnectionState(c connectionState) ConnectionState {
- return *(*ConnectionState)(unsafe.Pointer(&c))
-}
-
-func toClientSessionState(s *clientSessionState) *ClientSessionState {
- return (*ClientSessionState)(unsafe.Pointer(s))
-}
-
-func fromClientSessionState(s *ClientSessionState) *clientSessionState {
- return (*clientSessionState)(unsafe.Pointer(s))
-}
-
-func toCertificateRequestInfo(i *certificateRequestInfo) *CertificateRequestInfo {
- return (*CertificateRequestInfo)(unsafe.Pointer(i))
-}
-
-func toConfig(c *config) *Config {
- return (*Config)(unsafe.Pointer(c))
-}
-
-func fromConfig(c *Config) *config {
- return (*config)(unsafe.Pointer(c))
-}
-
-func toClientHelloInfo(chi *clientHelloInfo) *ClientHelloInfo {
- return (*ClientHelloInfo)(unsafe.Pointer(chi))
-}
-
-func structsEqual(a, b interface{}) bool {
- return compare(reflect.ValueOf(a), reflect.ValueOf(b))
-}
-
-func compare(a, b reflect.Value) bool {
- sa := a.Elem()
- sb := b.Elem()
- if sa.NumField() != sb.NumField() {
- return false
- }
- for i := 0; i < sa.NumField(); i++ {
- fa := sa.Type().Field(i)
- fb := sb.Type().Field(i)
- if !reflect.DeepEqual(fa.Index, fb.Index) || fa.Name != fb.Name || fa.Anonymous != fb.Anonymous || fa.Offset != fb.Offset || !reflect.DeepEqual(fa.Type, fb.Type) {
- if fa.Type.Kind() != fb.Type.Kind() {
- return false
- }
- if fa.Type.Kind() == reflect.Slice {
- if !compareStruct(fa.Type.Elem(), fb.Type.Elem()) {
- return false
- }
- continue
- }
- return false
- }
- }
- return true
-}
-
-func compareStruct(a, b reflect.Type) bool {
- if a.NumField() != b.NumField() {
- return false
- }
- for i := 0; i < a.NumField(); i++ {
- fa := a.Field(i)
- fb := b.Field(i)
- if !reflect.DeepEqual(fa.Index, fb.Index) || fa.Name != fb.Name || fa.Anonymous != fb.Anonymous || fa.Offset != fb.Offset || !reflect.DeepEqual(fa.Type, fb.Type) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
index 39bbcf00f..d569c0c94 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -1,5 +1,5 @@
-//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine
-// +build darwin freebsd openbsd netbsd dragonfly
+//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
+// +build darwin freebsd openbsd netbsd dragonfly hurd
// +build !appengine
package isatty
diff --git a/vendor/github.com/mattn/go-pointer/README.md b/vendor/github.com/mattn/go-pointer/README.md
deleted file mode 100644
index c74eee22a..000000000
--- a/vendor/github.com/mattn/go-pointer/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# go-pointer
-
-Utility for cgo
-
-## Usage
-
-https://github.com/golang/proposal/blob/master/design/12416-cgo-pointers.md
-
-In go 1.6, cgo argument can't be passed Go pointer.
-
-```
-var s string
-C.pass_pointer(pointer.Save(&s))
-v := *(pointer.Restore(C.get_from_pointer()).(*string))
-```
-
-## Installation
-
-```
-go get github.com/mattn/go-pointer
-```
-
-## License
-
-MIT
-
-## Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/mattn/go-pointer/doc.go b/vendor/github.com/mattn/go-pointer/doc.go
deleted file mode 100644
index c27bd8c05..000000000
--- a/vendor/github.com/mattn/go-pointer/doc.go
+++ /dev/null
@@ -1 +0,0 @@
-package pointer
diff --git a/vendor/github.com/mattn/go-pointer/pointer.go b/vendor/github.com/mattn/go-pointer/pointer.go
deleted file mode 100644
index 08a985339..000000000
--- a/vendor/github.com/mattn/go-pointer/pointer.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package pointer
-
-// #include
-import "C"
-import (
- "sync"
- "unsafe"
-)
-
-var (
- mutex sync.RWMutex
- store = map[unsafe.Pointer]interface{}{}
-)
-
-func Save(v interface{}) unsafe.Pointer {
- if v == nil {
- return nil
- }
-
- // Generate real fake C pointer.
- // This pointer will not store any data, but will bi used for indexing purposes.
- // Since Go doest allow to cast dangling pointer to unsafe.Pointer, we do rally allocate one byte.
- // Why we need indexing, because Go doest allow C code to store pointers to Go data.
- var ptr unsafe.Pointer = C.malloc(C.size_t(1))
- if ptr == nil {
- panic("can't allocate 'cgo-pointer hack index pointer': ptr == nil")
- }
-
- mutex.Lock()
- store[ptr] = v
- mutex.Unlock()
-
- return ptr
-}
-
-func Restore(ptr unsafe.Pointer) (v interface{}) {
- if ptr == nil {
- return nil
- }
-
- mutex.RLock()
- v = store[ptr]
- mutex.RUnlock()
- return
-}
-
-func Unref(ptr unsafe.Pointer) {
- if ptr == nil {
- return
- }
-
- mutex.Lock()
- delete(store, ptr)
- mutex.Unlock()
-
- C.free(ptr)
-}
diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE
index 55f12ab77..852ab9ced 100644
--- a/vendor/github.com/miekg/dns/LICENSE
+++ b/vendor/github.com/miekg/dns/LICENSE
@@ -1,30 +1,29 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+BSD 3-Clause License
+
+Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben.
+All rights reserved.
Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
+modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
-As this is fork of the official Go code the same license applies.
-Extensions of the original work are copyright (c) 2011 Miek Gieben
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 5a799d88f..06bea9fab 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -77,6 +77,10 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://ping.sx/dig
* https://fleetdeck.io/
* https://github.com/markdingo/autoreverse
+* https://github.com/slackhq/nebula
+* https://addr.tools/
+* https://dnscheck.tools/
+* https://github.com/egbakou/domainverifier
Send pull request if you want to be listed here.
@@ -140,6 +144,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
* 3597 - Unknown RRs
+* 4025 - A Method for Storing IPsec Keying Material in DNS
* 403{3,4,5} - DNSSEC + validation functions
* 4255 - SSHFP record
* 4343 - Case insensitivity
@@ -175,6 +180,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 8080 - EdDSA for DNSSEC
* 8499 - DNS Terminology
* 8659 - DNS Certification Authority Authorization (CAA) Resource Record
+* 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery
* 8914 - Extended DNS Errors
* 8976 - Message Digest for DNS Zones (ZONEMD RR)
diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go
index ac479db95..ab2812e33 100644
--- a/vendor/github.com/miekg/dns/acceptfunc.go
+++ b/vendor/github.com/miekg/dns/acceptfunc.go
@@ -19,7 +19,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction
// * has more than 0 RRs in the Authority section
//
// * has more than 2 RRs in the Additional section
-//
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
// MsgAcceptAction represents the action to be taken.
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
index 9aa658530..2cdd49af1 100644
--- a/vendor/github.com/miekg/dns/client.go
+++ b/vendor/github.com/miekg/dns/client.go
@@ -6,7 +6,6 @@ import (
"context"
"crypto/tls"
"encoding/binary"
- "fmt"
"io"
"net"
"strings"
@@ -56,14 +55,20 @@ type Client struct {
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
// Client.Dialer) or context.Context.Deadline (see ExchangeContext)
- Timeout time.Duration
- DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
- ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
- WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
- TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
- TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
- SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
- group singleflight
+ Timeout time.Duration
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
+ TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
+
+ // SingleInflight previously serialised multiple concurrent queries for the
+ // same Qname, Qtype and Qclass to ensure only one would be in flight at a
+ // time.
+ //
+ // Deprecated: This is a no-op. Callers should implement their own in flight
+ // query caching if needed. See github.com/miekg/dns/issues/1449.
+ SingleInflight bool
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
@@ -106,7 +111,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
}
// DialContext connects to the address on the named network, with a context.Context.
-// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released.
func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) {
// create a new dialer with the appropriate timeout
var d net.Dialer
@@ -127,15 +131,11 @@ func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, e
if useTLS {
network = strings.TrimSuffix(network, "-tls")
- // TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases.
- /*
- tlsDialer := tls.Dialer{
- NetDialer: &d,
- Config: c.TLSConfig,
- }
- conn.Conn, err = tlsDialer.DialContext(ctx, network, address)
- */
- conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
+ tlsDialer := tls.Dialer{
+ NetDialer: &d,
+ Config: c.TLSConfig,
+ }
+ conn.Conn, err = tlsDialer.DialContext(ctx, network, address)
} else {
conn.Conn, err = d.DialContext(ctx, network, address)
}
@@ -185,31 +185,12 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
// that entails when using "tcp" and especially "tcp-tls" clients.
//
// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to
-// prevent one cancelation from canceling all outstanding requests.
+// prevent one cancellation from canceling all outstanding requests.
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
return c.exchangeWithConnContext(context.Background(), m, conn)
}
-func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
- if !c.SingleInflight {
- return c.exchangeContext(ctx, m, conn)
- }
-
- q := m.Question[0]
- key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
- r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
- // When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries.
- // Hence we ignore the context and use Background().
- return c.exchangeContext(context.Background(), m, conn)
- })
- if r != nil && shared {
- r = r.Copy()
- }
-
- return r, rtt, err
-}
-
-func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
+func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
@@ -431,7 +412,6 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error)
// co.WriteMsg(m)
// in, _ := co.ReadMsg()
// co.Close()
-//
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
println("dns: ExchangeConn: this function is deprecated")
co := new(Conn)
diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go
index e11b630df..d00ac62fb 100644
--- a/vendor/github.com/miekg/dns/clientconfig.go
+++ b/vendor/github.com/miekg/dns/clientconfig.go
@@ -68,7 +68,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
}
case "search": // set search path to given servers
- c.Search = append([]string(nil), f[1:]...)
+ c.Search = cloneSlice(f[1:])
case "options": // magic options
for _, s := range f[1:] {
diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
index f2cdbf430..c1558b79c 100644
--- a/vendor/github.com/miekg/dns/defaults.go
+++ b/vendor/github.com/miekg/dns/defaults.go
@@ -208,7 +208,7 @@ func IsDomainName(s string) (labels int, ok bool) {
}
// check for \DDD
- if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
+ if isDDD(s[i+1:]) {
i += 3
begin += 3
} else {
@@ -272,18 +272,24 @@ func IsMsg(buf []byte) error {
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
- s2 := strings.TrimSuffix(s, ".")
- if s == s2 {
+ // Check for (and remove) a trailing dot, returning if there isn't one.
+ if s == "" || s[len(s)-1] != '.' {
return false
}
+ s = s[:len(s)-1]
- i := strings.LastIndexFunc(s2, func(r rune) bool {
+ // If we don't have an escape sequence before the final dot, we know it's
+ // fully qualified and can return here.
+ if s == "" || s[len(s)-1] != '\\' {
+ return true
+ }
+
+ // Otherwise we have to check if the dot is escaped or not by checking if
+ // there are an odd or even number of escape sequences before the dot.
+ i := strings.LastIndexFunc(s, func(r rune) bool {
return r != '\\'
})
-
- // Test whether we have an even number of escape sequences before
- // the dot or none.
- return (len(s2)-i)%2 != 0
+ return (len(s)-i)%2 != 0
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
index ea01aa81f..1be87eae6 100644
--- a/vendor/github.com/miekg/dns/dnssec.go
+++ b/vendor/github.com/miekg/dns/dnssec.go
@@ -128,10 +128,6 @@ type dnskeyWireFmt struct {
/* Nothing is left out */
}
-func divRoundUp(a, b int) int {
- return (a + b - 1) / b
-}
-
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
func (k *DNSKEY) KeyTag() uint16 {
if k == nil {
@@ -417,11 +413,11 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
return err
}
- sigbuf := rr.sigBuf() // Get the binary signature data
- if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
- // TODO(miek)
- // remove the domain name and assume its ours?
- }
+ sigbuf := rr.sigBuf() // Get the binary signature data
+ // TODO(miek)
+ // remove the domain name and assume its ours?
+ // if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
+ // }
h, cryptohash, err := hashFromAlgorithm(rr.Algorithm)
if err != nil {
diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
index f00f5722c..586ab6917 100644
--- a/vendor/github.com/miekg/dns/doc.go
+++ b/vendor/github.com/miekg/dns/doc.go
@@ -13,28 +13,28 @@ names in a message will result in a packing failure.
Resource records are native types. They are not stored in wire format. Basic
usage pattern for creating a new resource record:
- r := new(dns.MX)
- r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
- r.Preference = 10
- r.Mx = "mx.miek.nl."
+ r := new(dns.MX)
+ r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
+ r.Preference = 10
+ r.Mx = "mx.miek.nl."
Or directly from a string:
- mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
+ mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
- mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
+ mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
Or even:
- mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
+ mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource records
(sets). Use pattern for creating a message:
- m := new(dns.Msg)
- m.SetQuestion("miek.nl.", dns.TypeMX)
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
Or when not certain if the domain name is fully qualified:
@@ -45,17 +45,17 @@ records for the miek.nl. zone.
The following is slightly more verbose, but more flexible:
- m1 := new(dns.Msg)
- m1.Id = dns.Id()
- m1.RecursionDesired = true
- m1.Question = make([]dns.Question, 1)
- m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
+ m1 := new(dns.Msg)
+ m1.Id = dns.Id()
+ m1.RecursionDesired = true
+ m1.Question = make([]dns.Question, 1)
+ m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
After creating a message it can be sent. Basic use pattern for synchronous
querying the DNS at a server configured on 127.0.0.1 and port 53:
- c := new(dns.Client)
- in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
+ c := new(dns.Client)
+ in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
Suppressing multiple outstanding queries (with the same question, type and
class) is as easy as setting:
@@ -72,7 +72,7 @@ and port to use for the connection:
Port: 12345,
Zone: "",
}
- c.Dialer := &net.Dialer{
+ c.Dialer = &net.Dialer{
Timeout: 200 * time.Millisecond,
LocalAddr: &laddr,
}
@@ -96,7 +96,7 @@ the Answer section:
// do something with t.Txt
}
-Domain Name and TXT Character String Representations
+# Domain Name and TXT Character String Representations
Both domain names and TXT character strings are converted to presentation form
both when unpacked and when converted to strings.
@@ -108,7 +108,7 @@ be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
For domain names, in addition to the above rules brackets, periods, spaces,
semicolons and the at symbol are escaped.
-DNSSEC
+# DNSSEC
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
public key cryptography to sign resource records. The public keys are stored in
@@ -117,12 +117,12 @@ DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
bit to a request.
- m := new(dns.Msg)
- m.SetEdns0(4096, true)
+ m := new(dns.Msg)
+ m.SetEdns0(4096, true)
Signature generation, signature verification and key generation are all supported.
-DYNAMIC UPDATES
+# DYNAMIC UPDATES
Dynamic updates reuses the DNS message format, but renames three of the
sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
@@ -133,30 +133,30 @@ certain resource records or names in a zone to specify if resource records
should be added or removed. The table from RFC 2136 supplemented with the Go
DNS function shows which functions exist to specify the prerequisites.
- 3.2.4 - Table Of Metavalues Used In Prerequisite Section
+ 3.2.4 - Table Of Metavalues Used In Prerequisite Section
- CLASS TYPE RDATA Meaning Function
- --------------------------------------------------------------
- ANY ANY empty Name is in use dns.NameUsed
- ANY rrset empty RRset exists (value indep) dns.RRsetUsed
- NONE ANY empty Name is not in use dns.NameNotUsed
- NONE rrset empty RRset does not exist dns.RRsetNotUsed
- zone rrset rr RRset exists (value dep) dns.Used
+ CLASS TYPE RDATA Meaning Function
+ --------------------------------------------------------------
+ ANY ANY empty Name is in use dns.NameUsed
+ ANY rrset empty RRset exists (value indep) dns.RRsetUsed
+ NONE ANY empty Name is not in use dns.NameNotUsed
+ NONE rrset empty RRset does not exist dns.RRsetNotUsed
+ zone rrset rr RRset exists (value dep) dns.Used
The prerequisite section can also be left empty. If you have decided on the
prerequisites you can tell what RRs should be added or deleted. The next table
shows the options you have and what functions to call.
- 3.4.2.6 - Table Of Metavalues Used In Update Section
+ 3.4.2.6 - Table Of Metavalues Used In Update Section
- CLASS TYPE RDATA Meaning Function
- ---------------------------------------------------------------
- ANY ANY empty Delete all RRsets from name dns.RemoveName
- ANY rrset empty Delete an RRset dns.RemoveRRset
- NONE rrset rr Delete an RR from RRset dns.Remove
- zone rrset rr Add to an RRset dns.Insert
+ CLASS TYPE RDATA Meaning Function
+ ---------------------------------------------------------------
+ ANY ANY empty Delete all RRsets from name dns.RemoveName
+ ANY rrset empty Delete an RRset dns.RemoveRRset
+ NONE rrset rr Delete an RR from RRset dns.Remove
+ zone rrset rr Add to an RRset dns.Insert
-TRANSACTION SIGNATURE
+# TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512.
@@ -239,7 +239,7 @@ Basic use pattern validating and replying to a message that has TSIG set.
w.WriteMsg(m)
}
-PRIVATE RRS
+# PRIVATE RRS
RFC 6895 sets aside a range of type codes for private use. This range is 65,280
- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
@@ -248,7 +248,7 @@ can be used, before requesting an official type code from IANA.
See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
information.
-EDNS0
+# EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
RFC 6891. It defines a new RR type, the OPT RR, which is then completely
@@ -279,9 +279,9 @@ SIG(0)
From RFC 2931:
- SIG(0) provides protection for DNS transactions and requests ....
- ... protection for glue records, DNS requests, protection for message headers
- on requests and responses, and protection of the overall integrity of a response.
+ SIG(0) provides protection for DNS transactions and requests ....
+ ... protection for glue records, DNS requests, protection for message headers
+ on requests and responses, and protection of the overall integrity of a response.
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256,
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 14568c2e9..b5bdac816 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -78,7 +78,10 @@ func (rr *OPT) String() string {
if rr.Do() {
s += "flags: do; "
} else {
- s += "flags: ; "
+ s += "flags:; "
+ }
+ if rr.Hdr.Ttl&0x7FFF != 0 {
+ s += fmt.Sprintf("MBZ: 0x%04x, ", rr.Hdr.Ttl&0x7FFF)
}
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
@@ -98,6 +101,8 @@ func (rr *OPT) String() string {
s += "\n; SUBNET: " + o.String()
case *EDNS0_COOKIE:
s += "\n; COOKIE: " + o.String()
+ case *EDNS0_EXPIRE:
+ s += "\n; EXPIRE: " + o.String()
case *EDNS0_TCP_KEEPALIVE:
s += "\n; KEEPALIVE: " + o.String()
case *EDNS0_UL:
@@ -258,7 +263,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid}
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_SUBNET)
-// e.Code = dns.EDNS0SUBNET
+// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt)
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
@@ -515,8 +520,8 @@ type EDNS0_DAU struct {
// Option implements the EDNS0 interface.
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
-func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
-func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
+func (e *EDNS0_DAU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil }
+func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil }
func (e *EDNS0_DAU) String() string {
s := ""
@@ -539,8 +544,8 @@ type EDNS0_DHU struct {
// Option implements the EDNS0 interface.
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
-func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
-func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
+func (e *EDNS0_DHU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil }
+func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil }
func (e *EDNS0_DHU) String() string {
s := ""
@@ -563,8 +568,8 @@ type EDNS0_N3U struct {
// Option implements the EDNS0 interface.
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
-func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
-func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
+func (e *EDNS0_N3U) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil }
+func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil }
func (e *EDNS0_N3U) String() string {
// Re-use the hash map
@@ -641,30 +646,21 @@ type EDNS0_LOCAL struct {
// Option implements the EDNS0 interface.
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
+
func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
}
+
func (e *EDNS0_LOCAL) copy() EDNS0 {
- b := make([]byte, len(e.Data))
- copy(b, e.Data)
- return &EDNS0_LOCAL{e.Code, b}
+ return &EDNS0_LOCAL{e.Code, cloneSlice(e.Data)}
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
- b := make([]byte, len(e.Data))
- copied := copy(b, e.Data)
- if copied != len(e.Data) {
- return nil, ErrBuf
- }
- return b, nil
+ return cloneSlice(e.Data), nil
}
func (e *EDNS0_LOCAL) unpack(b []byte) error {
- e.Data = make([]byte, len(b))
- copied := copy(e.Data, b)
- if copied != len(b) {
- return ErrBuf
- }
+ e.Data = cloneSlice(b)
return nil
}
@@ -727,14 +723,10 @@ type EDNS0_PADDING struct {
// Option implements the EDNS0 interface.
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
-func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
-func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
+func (e *EDNS0_PADDING) pack() ([]byte, error) { return cloneSlice(e.Padding), nil }
+func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = cloneSlice(b); return nil }
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
-func (e *EDNS0_PADDING) copy() EDNS0 {
- b := make([]byte, len(e.Padding))
- copy(b, e.Padding)
- return &EDNS0_PADDING{b}
-}
+func (e *EDNS0_PADDING) copy() EDNS0 { return &EDNS0_PADDING{cloneSlice(e.Padding)} }
// Extended DNS Error Codes (RFC 8914).
const (
@@ -821,7 +813,7 @@ func (e *EDNS0_EDE) String() string {
func (e *EDNS0_EDE) pack() ([]byte, error) {
b := make([]byte, 2+len(e.ExtraText))
binary.BigEndian.PutUint16(b[0:], e.InfoCode)
- copy(b[2:], []byte(e.ExtraText))
+ copy(b[2:], e.ExtraText)
return b, nil
}
diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go
index 57410acda..505ae4308 100644
--- a/vendor/github.com/miekg/dns/fuzz.go
+++ b/vendor/github.com/miekg/dns/fuzz.go
@@ -1,3 +1,4 @@
+//go:build fuzz
// +build fuzz
package dns
diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go
index f9faacfeb..cd498d2e9 100644
--- a/vendor/github.com/miekg/dns/labels.go
+++ b/vendor/github.com/miekg/dns/labels.go
@@ -122,7 +122,7 @@ func Split(s string) []int {
}
// NextLabel returns the index of the start of the next label in the
-// string s starting at offset.
+// string s starting at offset. A negative offset will cause a panic.
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go
index b9201417a..6ed50f86b 100644
--- a/vendor/github.com/miekg/dns/listen_no_reuseport.go
+++ b/vendor/github.com/miekg/dns/listen_no_reuseport.go
@@ -1,4 +1,5 @@
-// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_reuseport.go
index fad195cfe..89bac9034 100644
--- a/vendor/github.com/miekg/dns/listen_reuseport.go
+++ b/vendor/github.com/miekg/dns/listen_reuseport.go
@@ -1,4 +1,4 @@
-// +build go1.11
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
index 89ebb64ab..d5049a4f9 100644
--- a/vendor/github.com/miekg/dns/msg.go
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -252,7 +252,7 @@ loop:
}
// check for \DDD
- if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) {
+ if isDDD(bs[i+1:]) {
bs[i] = dddToByte(bs[i+1:])
copy(bs[i+1:ls-3], bs[i+4:])
ls -= 3
@@ -448,7 +448,7 @@ Loop:
return string(s), off1, nil
}
-func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
+func packTxt(txt []string, msg []byte, offset int) (int, error) {
if len(txt) == 0 {
if offset >= len(msg) {
return offset, ErrBuf
@@ -458,10 +458,7 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
}
var err error
for _, s := range txt {
- if len(s) > len(tmp) {
- return offset, ErrBuf
- }
- offset, err = packTxtString(s, msg, offset, tmp)
+ offset, err = packTxtString(s, msg, offset)
if err != nil {
return offset, err
}
@@ -469,32 +466,30 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
return offset, nil
}
-func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+func packTxtString(s string, msg []byte, offset int) (int, error) {
lenByteOffset := offset
- if offset >= len(msg) || len(s) > len(tmp) {
+ if offset >= len(msg) || len(s) > 256*4+1 /* If all \DDD */ {
return offset, ErrBuf
}
offset++
- bs := tmp[:len(s)]
- copy(bs, s)
- for i := 0; i < len(bs); i++ {
+ for i := 0; i < len(s); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
- if bs[i] == '\\' {
+ if s[i] == '\\' {
i++
- if i == len(bs) {
+ if i == len(s) {
break
}
// check for \DDD
- if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
- msg[offset] = dddToByte(bs[i:])
+ if isDDD(s[i:]) {
+ msg[offset] = dddToByte(s[i:])
i += 2
} else {
- msg[offset] = bs[i]
+ msg[offset] = s[i]
}
} else {
- msg[offset] = bs[i]
+ msg[offset] = s[i]
}
offset++
}
@@ -522,7 +517,7 @@ func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error)
break
}
// check for \DDD
- if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ if isDDD(bs[i:]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
@@ -551,12 +546,11 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
// Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
-func dddToByte(s []byte) byte {
- _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
- return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
+func isDDD[T ~[]byte | ~string](s T) bool {
+ return len(s) >= 3 && isDigit(s[0]) && isDigit(s[1]) && isDigit(s[2])
}
-func dddStringToByte(s string) byte {
+func dddToByte[T ~[]byte | ~string](s T) byte {
_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
@@ -680,9 +674,9 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error)
// Convert a MsgHdr to a string, with dig-like headers:
//
-//;; opcode: QUERY, status: NOERROR, id: 48404
+// ;; opcode: QUERY, status: NOERROR, id: 48404
//
-//;; flags: qr aa rd ra;
+// ;; flags: qr aa rd ra;
func (h *MsgHdr) String() string {
if h == nil {
return " MsgHdr"
@@ -866,7 +860,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
// The header counts might have been wrong so we need to update it
dh.Nscount = uint16(len(dns.Ns))
if err == nil {
- dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
+ dns.Extra, _, err = unpackRRslice(int(dh.Arcount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Arcount = uint16(len(dns.Extra))
@@ -876,11 +870,11 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
dns.Rcode |= opt.ExtendedRcode()
}
- if off != len(msg) {
- // TODO(miek) make this an error?
- // use PackOpt to let people tell how detailed the error reporting should be?
- // println("dns: extra bytes in dns packet", off, "<", len(msg))
- }
+ // TODO(miek) make this an error?
+ // use PackOpt to let people tell how detailed the error reporting should be?
+ // if off != len(msg) {
+ // // println("dns: extra bytes in dns packet", off, "<", len(msg))
+ // }
return err
}
@@ -1024,7 +1018,7 @@ func escapedNameLen(s string) int {
continue
}
- if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
+ if isDDD(s[i+1:]) {
nameLen -= 3
i += 3
} else {
@@ -1065,8 +1059,8 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg {
r1.Compress = dns.Compress
if len(dns.Question) > 0 {
- r1.Question = make([]Question, len(dns.Question))
- copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
+ // TODO(miek): Question is an immutable value, ok to do a shallow-copy
+ r1.Question = cloneSlice(dns.Question)
}
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
index ea2035cd2..8582fc0ad 100644
--- a/vendor/github.com/miekg/dns/msg_helpers.go
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -299,8 +299,7 @@ func unpackString(msg []byte, off int) (string, int, error) {
}
func packString(s string, msg []byte, off int) (int, error) {
- txtTmp := make([]byte, 256*4+1)
- off, err := packTxtString(s, msg, off, txtTmp)
+ off, err := packTxtString(s, msg, off)
if err != nil {
return len(msg), err
}
@@ -402,8 +401,7 @@ func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
- txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
- off, err := packTxt(s, msg, off, txtTmp)
+ off, err := packTxt(s, msg, off)
if err != nil {
return len(msg), err
}
@@ -625,7 +623,7 @@ func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) {
}
func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) {
- pairs = append([]SVCBKeyValue(nil), pairs...)
+ pairs = cloneSlice(pairs)
sort.Slice(pairs, func(i, j int) bool {
return pairs[i].Key() < pairs[j].Key()
})
@@ -810,3 +808,37 @@ func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
Network: ipnet,
}, off, nil
}
+
+func unpackIPSECGateway(msg []byte, off int, gatewayType uint8) (net.IP, string, int, error) {
+ var retAddr net.IP
+ var retString string
+ var err error
+
+ switch gatewayType {
+ case IPSECGatewayNone: // do nothing
+ case IPSECGatewayIPv4:
+ retAddr, off, err = unpackDataA(msg, off)
+ case IPSECGatewayIPv6:
+ retAddr, off, err = unpackDataAAAA(msg, off)
+ case IPSECGatewayHost:
+ retString, off, err = UnpackDomainName(msg, off)
+ }
+
+ return retAddr, retString, off, err
+}
+
+func packIPSECGateway(gatewayAddr net.IP, gatewayString string, msg []byte, off int, gatewayType uint8, compression compressionMap, compress bool) (int, error) {
+ var err error
+
+ switch gatewayType {
+ case IPSECGatewayNone: // do nothing
+ case IPSECGatewayIPv4:
+ off, err = packDataA(gatewayAddr, msg, off)
+ case IPSECGatewayIPv6:
+ off, err = packDataAAAA(gatewayAddr, msg, off)
+ case IPSECGatewayHost:
+ off, err = packDomainName(gatewayString, msg, off, compression, compress)
+ }
+
+ return off, err
+}
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index 57be98827..3083c3e5f 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -10,13 +10,13 @@ import (
"strings"
)
-const maxTok = 2048 // Largest token we can return.
+const maxTok = 512 // Token buffer start size, and growth size amount.
// The maximum depth of $INCLUDE directives supported by the
// ZoneParser API.
const maxIncludeDepth = 7
-// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
+// Tokenize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
// * Suppress sequences of spaces;
// * Make each RR fit on one line (_NEWLINE is send as last)
@@ -765,8 +765,8 @@ func (zl *zlexer) Next() (lex, bool) {
}
var (
- str [maxTok]byte // Hold string text
- com [maxTok]byte // Hold comment text
+ str = make([]byte, maxTok) // Hold string text
+ com = make([]byte, maxTok) // Hold comment text
stri int // Offset in str (0 means empty)
comi int // Offset in com (0 means empty)
@@ -785,14 +785,12 @@ func (zl *zlexer) Next() (lex, bool) {
l.line, l.column = zl.line, zl.column
if stri >= len(str) {
- l.token = "token length insufficient for parsing"
- l.err = true
- return *l, true
+ // if buffer length is insufficient, increase it.
+ str = append(str[:], make([]byte, maxTok)...)
}
if comi >= len(com) {
- l.token = "comment length insufficient for parsing"
- l.err = true
- return *l, true
+ // if buffer length is insufficient, increase it.
+ com = append(com[:], make([]byte, maxTok)...)
}
switch x {
@@ -816,7 +814,7 @@ func (zl *zlexer) Next() (lex, bool) {
if stri == 0 {
// Space directly in the beginning, handled in the grammar
} else if zl.owner {
- // If we have a string and its the first, make it an owner
+ // If we have a string and it's the first, make it an owner
l.value = zOwner
l.token = string(str[:stri])
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index e398484da..d08c8e6a7 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -3,6 +3,7 @@ package dns
import (
"bytes"
"encoding/base64"
+ "errors"
"net"
"strconv"
"strings"
@@ -903,11 +904,18 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank
l, _ = c.Next()
- i, e := strconv.ParseUint(l.token, 10, 8)
- if e != nil || l.err {
+ if l.err {
return &ParseError{"", "bad RRSIG Algorithm", l}
}
- rr.Algorithm = uint8(i)
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ rr.Algorithm = uint8(i) // if 0 we'll check the mnemonic in the if
+ if e != nil {
+ v, ok := StringToAlgorithm[l.token]
+ if !ok {
+ return &ParseError{"", "bad RRSIG Algorithm", l}
+ }
+ rr.Algorithm = v
+ }
c.Next() // zBlank
l, _ = c.Next()
@@ -1216,6 +1224,117 @@ func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c,
func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") }
func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") }
+func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ num, err := strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad IPSECKEY value", l}
+ }
+ rr.Precedence = uint8(num)
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ num, err = strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad IPSECKEY value", l}
+ }
+ rr.GatewayType = uint8(num)
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ num, err = strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad IPSECKEY value", l}
+ }
+ rr.Algorithm = uint8(num)
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ if l.err {
+ return &ParseError{"", "bad IPSECKEY gateway", l}
+ }
+
+ rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType)
+ if err != nil {
+ return &ParseError{"", "IPSECKEY " + err.Error(), l}
+ }
+
+ c.Next() // zBlank
+
+ s, pErr := endingToString(c, "bad IPSECKEY PublicKey")
+ if pErr != nil {
+ return pErr
+ }
+ rr.PublicKey = s
+ return slurpRemainder(c)
+}
+
+func (rr *AMTRELAY) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ num, err := strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad AMTRELAY value", l}
+ }
+ rr.Precedence = uint8(num)
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ if l.err || !(l.token == "0" || l.token == "1") {
+ return &ParseError{"", "bad discovery value", l}
+ }
+ if l.token == "1" {
+ rr.GatewayType = 0x80
+ }
+
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ num, err = strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad AMTRELAY value", l}
+ }
+ rr.GatewayType |= uint8(num)
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ if l.err {
+ return &ParseError{"", "bad AMTRELAY gateway", l}
+ }
+
+ rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType&0x7f)
+ if err != nil {
+ return &ParseError{"", "AMTRELAY " + err.Error(), l}
+ }
+
+ return slurpRemainder(c)
+}
+
+// same constants and parsing between IPSECKEY and AMTRELAY
+func parseAddrHostUnion(token, o string, gatewayType uint8) (addr net.IP, host string, err error) {
+ switch gatewayType {
+ case IPSECGatewayNone:
+ if token != "." {
+ return addr, host, errors.New("gateway type none with gateway set")
+ }
+ case IPSECGatewayIPv4, IPSECGatewayIPv6:
+ addr = net.ParseIP(token)
+ if addr == nil {
+ return addr, host, errors.New("gateway IP invalid")
+ }
+ if (addr.To4() == nil) == (gatewayType == IPSECGatewayIPv4) {
+ return addr, host, errors.New("gateway IP family mismatch")
+ }
+ case IPSECGatewayHost:
+ var ok bool
+ host, ok = toAbsoluteName(token, o)
+ if !ok {
+ return addr, host, errors.New("invalid gateway host")
+ }
+ }
+
+ return addr, host, nil
+}
+
func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16)
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
index 4e5a9aa8a..64e388546 100644
--- a/vendor/github.com/miekg/dns/server.go
+++ b/vendor/github.com/miekg/dns/server.go
@@ -18,7 +18,7 @@ import (
const maxTCPQueries = 128
// aLongTimeAgo is a non-zero time, far in the past, used for
-// immediate cancelation of network operations.
+// immediate cancellation of network operations.
var aLongTimeAgo = time.Unix(1, 0)
// Handler is implemented by any value that implements ServeDNS.
@@ -224,7 +224,7 @@ type Server struct {
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
MaxTCPQueries int
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
- // It is only supported on go1.11+ and when using ListenAndServe.
+ // It is only supported on certain GOOSes and when using ListenAndServe.
ReusePort bool
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
// By default DefaultMsgAcceptFunc will be used.
diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go
deleted file mode 100644
index febcc300f..000000000
--- a/vendor/github.com/miekg/dns/singleinflight.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Adapted for dns package usage by Miek Gieben.
-
-package dns
-
-import "sync"
-import "time"
-
-// call is an in-flight or completed singleflight.Do call
-type call struct {
- wg sync.WaitGroup
- val *Msg
- rtt time.Duration
- err error
- dups int
-}
-
-// singleflight represents a class of work and forms a namespace in
-// which units of work can be executed with duplicate suppression.
-type singleflight struct {
- sync.Mutex // protects m
- m map[string]*call // lazily initialized
-
- dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
-}
-
-// Do executes and returns the results of the given function, making
-// sure that only one execution is in-flight for a given key at a
-// time. If a duplicate comes in, the duplicate caller waits for the
-// original to complete and receives the same results.
-// The return value shared indicates whether v was given to multiple callers.
-func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
- g.Lock()
- if g.m == nil {
- g.m = make(map[string]*call)
- }
- if c, ok := g.m[key]; ok {
- c.dups++
- g.Unlock()
- c.wg.Wait()
- return c.val, c.rtt, c.err, true
- }
- c := new(call)
- c.wg.Add(1)
- g.m[key] = c
- g.Unlock()
-
- c.val, c.rtt, c.err = fn()
- c.wg.Done()
-
- if !g.dontDeleteForTesting {
- g.Lock()
- delete(g.m, key)
- g.Unlock()
- }
-
- return c.val, c.rtt, c.err, c.dups > 0
-}
diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go
index ea58710da..6d496d74d 100644
--- a/vendor/github.com/miekg/dns/svcb.go
+++ b/vendor/github.com/miekg/dns/svcb.go
@@ -289,7 +289,7 @@ func (s *SVCBMandatory) String() string {
}
func (s *SVCBMandatory) pack() ([]byte, error) {
- codes := append([]SVCBKey(nil), s.Code...)
+ codes := cloneSlice(s.Code)
sort.Slice(codes, func(i, j int) bool {
return codes[i] < codes[j]
})
@@ -328,9 +328,7 @@ func (s *SVCBMandatory) len() int {
}
func (s *SVCBMandatory) copy() SVCBKeyValue {
- return &SVCBMandatory{
- append([]SVCBKey(nil), s.Code...),
- }
+ return &SVCBMandatory{cloneSlice(s.Code)}
}
// SVCBAlpn pair is used to list supported connection protocols.
@@ -353,7 +351,7 @@ func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN }
func (s *SVCBAlpn) String() string {
// An ALPN value is a comma-separated list of values, each of which can be
// an arbitrary binary value. In order to allow parsing, the comma and
- // backslash characters are themselves excaped.
+ // backslash characters are themselves escaped.
//
// However, this escaping is done in addition to the normal escaping which
// happens in zone files, meaning that these values must be
@@ -481,9 +479,7 @@ func (s *SVCBAlpn) len() int {
}
func (s *SVCBAlpn) copy() SVCBKeyValue {
- return &SVCBAlpn{
- append([]string(nil), s.Alpn...),
- }
+ return &SVCBAlpn{cloneSlice(s.Alpn)}
}
// SVCBNoDefaultAlpn pair signifies no support for default connection protocols.
@@ -563,15 +559,15 @@ func (s *SVCBPort) parse(b string) error {
// to the hinted IP address may be terminated and a new connection may be opened.
// Basic use pattern for creating an ipv4hint option:
//
-// h := new(dns.HTTPS)
-// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
-// e := new(dns.SVCBIPv4Hint)
-// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()}
+// h := new(dns.HTTPS)
+// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
+// e := new(dns.SVCBIPv4Hint)
+// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()}
//
-// Or
+// Or
//
-// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()}
-// h.Value = append(h.Value, e)
+// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()}
+// h.Value = append(h.Value, e)
type SVCBIPv4Hint struct {
Hint []net.IP
}
@@ -595,6 +591,7 @@ func (s *SVCBIPv4Hint) unpack(b []byte) error {
if len(b) == 0 || len(b)%4 != 0 {
return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
}
+ b = cloneSlice(b)
x := make([]net.IP, 0, len(b)/4)
for i := 0; i < len(b); i += 4 {
x = append(x, net.IP(b[i:i+4]))
@@ -635,12 +632,9 @@ func (s *SVCBIPv4Hint) parse(b string) error {
func (s *SVCBIPv4Hint) copy() SVCBKeyValue {
hint := make([]net.IP, len(s.Hint))
for i, ip := range s.Hint {
- hint[i] = copyIP(ip)
- }
-
- return &SVCBIPv4Hint{
- Hint: hint,
+ hint[i] = cloneSlice(ip)
}
+ return &SVCBIPv4Hint{Hint: hint}
}
// SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx].
@@ -660,19 +654,18 @@ func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) }
func (s *SVCBECHConfig) len() int { return len(s.ECH) }
func (s *SVCBECHConfig) pack() ([]byte, error) {
- return append([]byte(nil), s.ECH...), nil
+ return cloneSlice(s.ECH), nil
}
func (s *SVCBECHConfig) copy() SVCBKeyValue {
- return &SVCBECHConfig{
- append([]byte(nil), s.ECH...),
- }
+ return &SVCBECHConfig{cloneSlice(s.ECH)}
}
func (s *SVCBECHConfig) unpack(b []byte) error {
- s.ECH = append([]byte(nil), b...)
+ s.ECH = cloneSlice(b)
return nil
}
+
func (s *SVCBECHConfig) parse(b string) error {
x, err := fromBase64([]byte(b))
if err != nil {
@@ -715,6 +708,7 @@ func (s *SVCBIPv6Hint) unpack(b []byte) error {
if len(b) == 0 || len(b)%16 != 0 {
return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16")
}
+ b = cloneSlice(b)
x := make([]net.IP, 0, len(b)/16)
for i := 0; i < len(b); i += 16 {
ip := net.IP(b[i : i+16])
@@ -758,12 +752,9 @@ func (s *SVCBIPv6Hint) parse(b string) error {
func (s *SVCBIPv6Hint) copy() SVCBKeyValue {
hint := make([]net.IP, len(s.Hint))
for i, ip := range s.Hint {
- hint[i] = copyIP(ip)
- }
-
- return &SVCBIPv6Hint{
- Hint: hint,
+ hint[i] = cloneSlice(ip)
}
+ return &SVCBIPv6Hint{Hint: hint}
}
// SVCBDoHPath pair is used to indicate the URI template that the
@@ -831,11 +822,11 @@ type SVCBLocal struct {
func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode }
func (s *SVCBLocal) String() string { return svcbParamToStr(s.Data) }
-func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil }
+func (s *SVCBLocal) pack() ([]byte, error) { return cloneSlice(s.Data), nil }
func (s *SVCBLocal) len() int { return len(s.Data) }
func (s *SVCBLocal) unpack(b []byte) error {
- s.Data = append([]byte(nil), b...)
+ s.Data = cloneSlice(b)
return nil
}
@@ -849,9 +840,7 @@ func (s *SVCBLocal) parse(b string) error {
}
func (s *SVCBLocal) copy() SVCBKeyValue {
- return &SVCBLocal{s.KeyCode,
- append([]byte(nil), s.Data...),
- }
+ return &SVCBLocal{s.KeyCode, cloneSlice(s.Data)}
}
func (rr *SVCB) String() string {
@@ -867,8 +856,8 @@ func (rr *SVCB) String() string {
// areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their
// copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function.
func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool {
- a = append([]SVCBKeyValue(nil), a...)
- b = append([]SVCBKeyValue(nil), b...)
+ a = cloneSlice(a)
+ b = cloneSlice(b)
sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() })
sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() })
for i, e := range a {
diff --git a/vendor/github.com/miekg/dns/tools.go b/vendor/github.com/miekg/dns/tools.go
index d11182536..ccf8f6bfc 100644
--- a/vendor/github.com/miekg/dns/tools.go
+++ b/vendor/github.com/miekg/dns/tools.go
@@ -1,3 +1,4 @@
+//go:build tools
// +build tools
// We include our tool dependencies for `go generate` here to ensure they're
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index d9becb67c..03afeccda 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -65,6 +65,7 @@ const (
TypeAPL uint16 = 42
TypeDS uint16 = 43
TypeSSHFP uint16 = 44
+ TypeIPSECKEY uint16 = 45
TypeRRSIG uint16 = 46
TypeNSEC uint16 = 47
TypeDNSKEY uint16 = 48
@@ -98,6 +99,7 @@ const (
TypeURI uint16 = 256
TypeCAA uint16 = 257
TypeAVC uint16 = 258
+ TypeAMTRELAY uint16 = 260
TypeTKEY uint16 = 249
TypeTSIG uint16 = 250
@@ -159,6 +161,22 @@ const (
ZoneMDHashAlgSHA512 = 2
)
+// Used in IPSEC https://datatracker.ietf.org/doc/html/rfc4025#section-2.3
+const (
+ IPSECGatewayNone uint8 = iota
+ IPSECGatewayIPv4
+ IPSECGatewayIPv6
+ IPSECGatewayHost
+)
+
+// Used in AMTRELAY https://datatracker.ietf.org/doc/html/rfc8777#section-4.2.3
+const (
+ AMTRELAYNone = IPSECGatewayNone
+ AMTRELAYIPv4 = IPSECGatewayIPv4
+ AMTRELAYIPv6 = IPSECGatewayIPv6
+ AMTRELAYHost = IPSECGatewayHost
+)
+
// Header is the wire format for the DNS packet header.
type Header struct {
Id uint16
@@ -180,7 +198,7 @@ const (
_CD = 1 << 4 // checking disabled
)
-// Various constants used in the LOC RR. See RFC 1887.
+// Various constants used in the LOC RR. See RFC 1876.
const (
LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
@@ -613,8 +631,8 @@ func nextByte(s string, offset int) (byte, int) {
return 0, 0
case 2, 3: // too short to be \ddd
default: // maybe \ddd
- if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) {
- return dddStringToByte(s[offset+1:]), 4
+ if isDDD(s[offset+1:]) {
+ return dddToByte(s[offset+1:]), 4
}
}
// not \ddd, just an RFC 1035 "quoted" character
@@ -774,7 +792,10 @@ type LOC struct {
// cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent
// format and returns a string in m (two decimals for the cm).
-func cmToM(m, e uint8) string {
+func cmToM(x uint8) string {
+ m := x & 0xf0 >> 4
+ e := x & 0x0f
+
if e < 2 {
if e == 1 {
m *= 10
@@ -830,10 +851,9 @@ func (rr *LOC) String() string {
s += fmt.Sprintf("%.0fm ", alt)
}
- s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m "
- s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m "
- s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m"
-
+ s += cmToM(rr.Size) + "m "
+ s += cmToM(rr.HorizPre) + "m "
+ s += cmToM(rr.VertPre) + "m"
return s
}
@@ -994,6 +1014,69 @@ func (rr *DNSKEY) String() string {
" " + rr.PublicKey
}
+// IPSECKEY RR. See RFC 4025.
+type IPSECKEY struct {
+ Hdr RR_Header
+ Precedence uint8
+ GatewayType uint8
+ Algorithm uint8
+ GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost
+ GatewayHost string `dns:"ipsechost"`
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *IPSECKEY) String() string {
+ var gateway string
+ switch rr.GatewayType {
+ case IPSECGatewayIPv4, IPSECGatewayIPv6:
+ gateway = rr.GatewayAddr.String()
+ case IPSECGatewayHost:
+ gateway = rr.GatewayHost
+ case IPSECGatewayNone:
+ fallthrough
+ default:
+ gateway = "."
+ }
+
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) +
+ " " + strconv.Itoa(int(rr.GatewayType)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + gateway +
+ " " + rr.PublicKey
+}
+
+// AMTRELAY RR. See RFC 8777.
+type AMTRELAY struct {
+ Hdr RR_Header
+ Precedence uint8
+ GatewayType uint8 // discovery is packed in here at bit 0x80
+ GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost
+ GatewayHost string `dns:"amtrelayhost"`
+}
+
+func (rr *AMTRELAY) String() string {
+ var gateway string
+ switch rr.GatewayType & 0x7f {
+ case AMTRELAYIPv4, AMTRELAYIPv6:
+ gateway = rr.GatewayAddr.String()
+ case AMTRELAYHost:
+ gateway = rr.GatewayHost
+ case AMTRELAYNone:
+ fallthrough
+ default:
+ gateway = "."
+ }
+ boolS := "0"
+ if rr.GatewayType&0x80 == 0x80 {
+ boolS = "1"
+ }
+
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) +
+ " " + boolS +
+ " " + strconv.Itoa(int(rr.GatewayType&0x7f)) +
+ " " + gateway
+}
+
// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template.
type RKEY struct {
Hdr RR_Header
@@ -1450,7 +1533,7 @@ func (a *APLPrefix) str() string {
// equals reports whether two APL prefixes are identical.
func (a *APLPrefix) equals(b *APLPrefix) bool {
return a.Negation == b.Negation &&
- bytes.Equal(a.Network.IP, b.Network.IP) &&
+ a.Network.IP.Equal(b.Network.IP) &&
bytes.Equal(a.Network.Mask, b.Network.Mask)
}
@@ -1518,21 +1601,19 @@ func euiToString(eui uint64, bits int) (hex string) {
return
}
-// copyIP returns a copy of ip.
-func copyIP(ip net.IP) net.IP {
- p := make(net.IP, len(ip))
- copy(p, ip)
- return p
+// cloneSlice returns a shallow copy of s.
+func cloneSlice[E any, S ~[]E](s S) S {
+ if s == nil {
+ return nil
+ }
+ return append(S(nil), s...)
}
// copyNet returns a copy of a subnet.
func copyNet(n net.IPNet) net.IPNet {
- m := make(net.IPMask, len(n.Mask))
- copy(m, n.Mask)
-
return net.IPNet{
- IP: copyIP(n.IP),
- Mask: m,
+ IP: cloneSlice(n.IP),
+ Mask: cloneSlice(n.Mask),
}
}
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
index a4826ee2f..c018ad43d 100644
--- a/vendor/github.com/miekg/dns/udp.go
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package dns
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
index e7dd8ca31..a259b67e4 100644
--- a/vendor/github.com/miekg/dns/udp_windows.go
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -1,5 +1,9 @@
+//go:build windows
// +build windows
+// TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and
+// go.dev/issue/7174 are ever fixed.
+
package dns
import "net"
@@ -14,7 +18,6 @@ func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
-// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
n, raddr, err := conn.ReadFrom(b)
if err != nil {
@@ -24,12 +27,9 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
}
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
-// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
return conn.WriteTo(b, session.raddr)
}
-// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
-// use the standard method in udp.go for these.
func setUDPSocketOptions(*net.UDPConn) error { return nil }
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index b1a872bd5..6094585d8 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = v{1, 1, 50}
+var Version = v{1, 1, 54}
// v holds the version of this library.
type v struct {
diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go
index 1917e91c8..0a831c880 100644
--- a/vendor/github.com/miekg/dns/xfr.go
+++ b/vendor/github.com/miekg/dns/xfr.go
@@ -44,7 +44,6 @@ func (t *Transfer) tsigProvider() TsigProvider {
// dnscon := &dns.Conn{Conn:con}
// transfer = &dns.Transfer{Conn: dnscon}
// channel, err := transfer.In(message, master)
-//
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
switch q.Question[0].Qtype {
case TypeAXFR, TypeIXFR:
diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
index 9eb1dac29..450bbbc29 100644
--- a/vendor/github.com/miekg/dns/zduplicate.go
+++ b/vendor/github.com/miekg/dns/zduplicate.go
@@ -43,6 +43,32 @@ func (r1 *AFSDB) isDuplicate(_r2 RR) bool {
return true
}
+func (r1 *AMTRELAY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*AMTRELAY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Precedence != r2.Precedence {
+ return false
+ }
+ if r1.GatewayType != r2.GatewayType {
+ return false
+ }
+ switch r1.GatewayType {
+ case IPSECGatewayIPv4, IPSECGatewayIPv6:
+ if !r1.GatewayAddr.Equal(r2.GatewayAddr) {
+ return false
+ }
+ case IPSECGatewayHost:
+ if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) {
+ return false
+ }
+ }
+
+ return true
+}
+
func (r1 *ANY) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*ANY)
if !ok {
@@ -423,6 +449,38 @@ func (r1 *HTTPS) isDuplicate(_r2 RR) bool {
return true
}
+func (r1 *IPSECKEY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*IPSECKEY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Precedence != r2.Precedence {
+ return false
+ }
+ if r1.GatewayType != r2.GatewayType {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ switch r1.GatewayType {
+ case IPSECGatewayIPv4, IPSECGatewayIPv6:
+ if !r1.GatewayAddr.Equal(r2.GatewayAddr) {
+ return false
+ }
+ case IPSECGatewayHost:
+ if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) {
+ return false
+ }
+ }
+
+ if r1.PublicKey != r2.PublicKey {
+ return false
+ }
+ return true
+}
+
func (r1 *KEY) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*KEY)
if !ok {
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index fc0822f98..3ea0eb423 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -32,6 +32,22 @@ func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress
return off, nil
}
+func (rr *AMTRELAY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Precedence, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.GatewayType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
return off, nil
}
@@ -332,6 +348,30 @@ func (rr *HTTPS) pack(msg []byte, off int, compression compressionMap, compress
return off, nil
}
+func (rr *IPSECKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Precedence, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.GatewayType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packUint16(rr.Flags, msg, off)
if err != nil {
@@ -1180,6 +1220,34 @@ func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) {
return off, nil
}
+func (rr *AMTRELAY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Precedence, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.GatewayType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off
_ = rdStart
@@ -1636,6 +1704,48 @@ func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) {
return off, nil
}
+func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Precedence, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.GatewayType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off
_ = rdStart
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
index 5d060cfee..1b6f43200 100644
--- a/vendor/github.com/miekg/dns/ztypes.go
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -12,6 +12,7 @@ var TypeToRR = map[uint16]func() RR{
TypeA: func() RR { return new(A) },
TypeAAAA: func() RR { return new(AAAA) },
TypeAFSDB: func() RR { return new(AFSDB) },
+ TypeAMTRELAY: func() RR { return new(AMTRELAY) },
TypeANY: func() RR { return new(ANY) },
TypeAPL: func() RR { return new(APL) },
TypeAVC: func() RR { return new(AVC) },
@@ -34,6 +35,7 @@ var TypeToRR = map[uint16]func() RR{
TypeHINFO: func() RR { return new(HINFO) },
TypeHIP: func() RR { return new(HIP) },
TypeHTTPS: func() RR { return new(HTTPS) },
+ TypeIPSECKEY: func() RR { return new(IPSECKEY) },
TypeKEY: func() RR { return new(KEY) },
TypeKX: func() RR { return new(KX) },
TypeL32: func() RR { return new(L32) },
@@ -90,6 +92,7 @@ var TypeToString = map[uint16]string{
TypeA: "A",
TypeAAAA: "AAAA",
TypeAFSDB: "AFSDB",
+ TypeAMTRELAY: "AMTRELAY",
TypeANY: "ANY",
TypeAPL: "APL",
TypeATMA: "ATMA",
@@ -114,6 +117,7 @@ var TypeToString = map[uint16]string{
TypeHINFO: "HINFO",
TypeHIP: "HIP",
TypeHTTPS: "HTTPS",
+ TypeIPSECKEY: "IPSECKEY",
TypeISDN: "ISDN",
TypeIXFR: "IXFR",
TypeKEY: "KEY",
@@ -176,6 +180,7 @@ var TypeToString = map[uint16]string{
func (rr *A) Header() *RR_Header { return &rr.Hdr }
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
+func (rr *AMTRELAY) Header() *RR_Header { return &rr.Hdr }
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
func (rr *APL) Header() *RR_Header { return &rr.Hdr }
func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
@@ -198,6 +203,7 @@ func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr }
+func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
func (rr *KX) Header() *RR_Header { return &rr.Hdr }
func (rr *L32) Header() *RR_Header { return &rr.Hdr }
@@ -257,6 +263,7 @@ func (rr *A) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *AAAA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
if len(rr.AAAA) != 0 {
@@ -264,16 +271,34 @@ func (rr *AAAA) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *AFSDB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Subtype
l += domainNameLen(rr.Hostname, off+l, compression, false)
return l
}
+
+func (rr *AMTRELAY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Precedence
+ l++ // GatewayType
+ switch rr.GatewayType {
+ case AMTRELAYIPv4:
+ l += net.IPv4len
+ case AMTRELAYIPv6:
+ l += net.IPv6len
+ case AMTRELAYHost:
+ l += len(rr.GatewayHost) + 1
+ }
+ return l
+}
+
func (rr *ANY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
return l
}
+
func (rr *APL) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Prefixes {
@@ -281,6 +306,7 @@ func (rr *APL) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *AVC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
@@ -288,6 +314,7 @@ func (rr *AVC) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *CAA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Flag
@@ -295,6 +322,7 @@ func (rr *CAA) len(off int, compression map[string]struct{}) int {
l += len(rr.Value)
return l
}
+
func (rr *CERT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Type
@@ -303,21 +331,25 @@ func (rr *CERT) len(off int, compression map[string]struct{}) int {
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
return l
}
+
func (rr *CNAME) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Target, off+l, compression, true)
return l
}
+
func (rr *DHCID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.Digest))
return l
}
+
func (rr *DNAME) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Target, off+l, compression, false)
return l
}
+
func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Flags
@@ -326,6 +358,7 @@ func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
+
func (rr *DS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // KeyTag
@@ -334,26 +367,31 @@ func (rr *DS) len(off int, compression map[string]struct{}) int {
l += len(rr.Digest) / 2
return l
}
+
func (rr *EID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Endpoint) / 2
return l
}
+
func (rr *EUI48) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 6 // Address
return l
}
+
func (rr *EUI64) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 8 // Address
return l
}
+
func (rr *GID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 4 // Gid
return l
}
+
func (rr *GPOS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Longitude) + 1
@@ -361,12 +399,14 @@ func (rr *GPOS) len(off int, compression map[string]struct{}) int {
l += len(rr.Altitude) + 1
return l
}
+
func (rr *HINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Cpu) + 1
l += len(rr.Os) + 1
return l
}
+
func (rr *HIP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // HitLength
@@ -379,12 +419,31 @@ func (rr *HIP) len(off int, compression map[string]struct{}) int {
}
return l
}
+
+func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Precedence
+ l++ // GatewayType
+ l++ // Algorithm
+ switch rr.GatewayType {
+ case IPSECGatewayIPv4:
+ l += net.IPv4len
+ case IPSECGatewayIPv6:
+ l += net.IPv6len
+ case IPSECGatewayHost:
+ l += len(rr.GatewayHost) + 1
+ }
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+
func (rr *KX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Exchanger, off+l, compression, false)
return l
}
+
func (rr *L32) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
@@ -393,12 +452,14 @@ func (rr *L32) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *L64) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += 8 // Locator64
return l
}
+
func (rr *LOC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Version
@@ -410,49 +471,58 @@ func (rr *LOC) len(off int, compression map[string]struct{}) int {
l += 4 // Altitude
return l
}
+
func (rr *LP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Fqdn, off+l, compression, false)
return l
}
+
func (rr *MB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mb, off+l, compression, true)
return l
}
+
func (rr *MD) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Md, off+l, compression, true)
return l
}
+
func (rr *MF) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mf, off+l, compression, true)
return l
}
+
func (rr *MG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mg, off+l, compression, true)
return l
}
+
func (rr *MINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Rmail, off+l, compression, true)
l += domainNameLen(rr.Email, off+l, compression, true)
return l
}
+
func (rr *MR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mr, off+l, compression, true)
return l
}
+
func (rr *MX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Mx, off+l, compression, true)
return l
}
+
func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Order
@@ -463,17 +533,20 @@ func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
l += domainNameLen(rr.Replacement, off+l, compression, false)
return l
}
+
func (rr *NID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += 8 // NodeID
return l
}
+
func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Locator) / 2
return l
}
+
func (rr *NINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.ZSData {
@@ -481,16 +554,19 @@ func (rr *NINFO) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *NS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ns, off+l, compression, true)
return l
}
+
func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ptr, off+l, compression, false)
return l
}
+
func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Hash
@@ -500,21 +576,25 @@ func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
l += len(rr.Salt) / 2
return l
}
+
func (rr *NULL) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Data)
return l
}
+
func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
+
func (rr *PTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ptr, off+l, compression, true)
return l
}
+
func (rr *PX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
@@ -522,11 +602,13 @@ func (rr *PX) len(off int, compression map[string]struct{}) int {
l += domainNameLen(rr.Mapx400, off+l, compression, false)
return l
}
+
func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Rdata) / 2
return l
}
+
func (rr *RKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Flags
@@ -535,12 +617,14 @@ func (rr *RKEY) len(off int, compression map[string]struct{}) int {
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
+
func (rr *RP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mbox, off+l, compression, false)
l += domainNameLen(rr.Txt, off+l, compression, false)
return l
}
+
func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // TypeCovered
@@ -554,12 +638,14 @@ func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
l += base64.StdEncoding.DecodedLen(len(rr.Signature))
return l
}
+
func (rr *RT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Host, off+l, compression, false)
return l
}
+
func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Usage
@@ -568,6 +654,7 @@ func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
l += len(rr.Certificate) / 2
return l
}
+
func (rr *SOA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ns, off+l, compression, true)
@@ -579,6 +666,7 @@ func (rr *SOA) len(off int, compression map[string]struct{}) int {
l += 4 // Minttl
return l
}
+
func (rr *SPF) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
@@ -586,6 +674,7 @@ func (rr *SPF) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *SRV) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Priority
@@ -594,6 +683,7 @@ func (rr *SRV) len(off int, compression map[string]struct{}) int {
l += domainNameLen(rr.Target, off+l, compression, false)
return l
}
+
func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Algorithm
@@ -601,6 +691,7 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
l += len(rr.FingerPrint) / 2
return l
}
+
func (rr *SVCB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Priority
@@ -610,6 +701,7 @@ func (rr *SVCB) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *TA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // KeyTag
@@ -618,12 +710,14 @@ func (rr *TA) len(off int, compression map[string]struct{}) int {
l += len(rr.Digest) / 2
return l
}
+
func (rr *TALINK) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.PreviousName, off+l, compression, false)
l += domainNameLen(rr.NextName, off+l, compression, false)
return l
}
+
func (rr *TKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Algorithm, off+l, compression, false)
@@ -637,6 +731,7 @@ func (rr *TKEY) len(off int, compression map[string]struct{}) int {
l += len(rr.OtherData) / 2
return l
}
+
func (rr *TLSA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Usage
@@ -645,6 +740,7 @@ func (rr *TLSA) len(off int, compression map[string]struct{}) int {
l += len(rr.Certificate) / 2
return l
}
+
func (rr *TSIG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Algorithm, off+l, compression, false)
@@ -658,6 +754,7 @@ func (rr *TSIG) len(off int, compression map[string]struct{}) int {
l += len(rr.OtherData) / 2
return l
}
+
func (rr *TXT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
@@ -665,16 +762,19 @@ func (rr *TXT) len(off int, compression map[string]struct{}) int {
}
return l
}
+
func (rr *UID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 4 // Uid
return l
}
+
func (rr *UINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Uinfo) + 1
return l
}
+
func (rr *URI) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Priority
@@ -682,11 +782,13 @@ func (rr *URI) len(off int, compression map[string]struct{}) int {
l += len(rr.Target)
return l
}
+
func (rr *X25) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.PSDNAddress) + 1
return l
}
+
func (rr *ZONEMD) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 4 // Serial
@@ -698,17 +800,31 @@ func (rr *ZONEMD) len(off int, compression map[string]struct{}) int {
// copy() functions
func (rr *A) copy() RR {
- return &A{rr.Hdr, copyIP(rr.A)}
+ return &A{rr.Hdr, cloneSlice(rr.A)}
}
+
func (rr *AAAA) copy() RR {
- return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
+ return &AAAA{rr.Hdr, cloneSlice(rr.AAAA)}
}
+
func (rr *AFSDB) copy() RR {
return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
}
+
+func (rr *AMTRELAY) copy() RR {
+ return &AMTRELAY{
+ rr.Hdr,
+ rr.Precedence,
+ rr.GatewayType,
+ cloneSlice(rr.GatewayAddr),
+ rr.GatewayHost,
+ }
+}
+
func (rr *ANY) copy() RR {
return &ANY{rr.Hdr}
}
+
func (rr *APL) copy() RR {
Prefixes := make([]APLPrefix, len(rr.Prefixes))
for i, e := range rr.Prefixes {
@@ -716,150 +832,270 @@ func (rr *APL) copy() RR {
}
return &APL{rr.Hdr, Prefixes}
}
+
func (rr *AVC) copy() RR {
- Txt := make([]string, len(rr.Txt))
- copy(Txt, rr.Txt)
- return &AVC{rr.Hdr, Txt}
+ return &AVC{rr.Hdr, cloneSlice(rr.Txt)}
}
+
func (rr *CAA) copy() RR {
- return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
+ return &CAA{
+ rr.Hdr,
+ rr.Flag,
+ rr.Tag,
+ rr.Value,
+ }
}
+
func (rr *CDNSKEY) copy() RR {
return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)}
}
+
func (rr *CDS) copy() RR {
return &CDS{*rr.DS.copy().(*DS)}
}
+
func (rr *CERT) copy() RR {
- return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
+ return &CERT{
+ rr.Hdr,
+ rr.Type,
+ rr.KeyTag,
+ rr.Algorithm,
+ rr.Certificate,
+ }
}
+
func (rr *CNAME) copy() RR {
return &CNAME{rr.Hdr, rr.Target}
}
+
func (rr *CSYNC) copy() RR {
- TypeBitMap := make([]uint16, len(rr.TypeBitMap))
- copy(TypeBitMap, rr.TypeBitMap)
- return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
+ return &CSYNC{
+ rr.Hdr,
+ rr.Serial,
+ rr.Flags,
+ cloneSlice(rr.TypeBitMap),
+ }
}
+
func (rr *DHCID) copy() RR {
return &DHCID{rr.Hdr, rr.Digest}
}
+
func (rr *DLV) copy() RR {
return &DLV{*rr.DS.copy().(*DS)}
}
+
func (rr *DNAME) copy() RR {
return &DNAME{rr.Hdr, rr.Target}
}
+
func (rr *DNSKEY) copy() RR {
- return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+ return &DNSKEY{
+ rr.Hdr,
+ rr.Flags,
+ rr.Protocol,
+ rr.Algorithm,
+ rr.PublicKey,
+ }
}
+
func (rr *DS) copy() RR {
- return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+ return &DS{
+ rr.Hdr,
+ rr.KeyTag,
+ rr.Algorithm,
+ rr.DigestType,
+ rr.Digest,
+ }
}
+
func (rr *EID) copy() RR {
return &EID{rr.Hdr, rr.Endpoint}
}
+
func (rr *EUI48) copy() RR {
return &EUI48{rr.Hdr, rr.Address}
}
+
func (rr *EUI64) copy() RR {
return &EUI64{rr.Hdr, rr.Address}
}
+
func (rr *GID) copy() RR {
return &GID{rr.Hdr, rr.Gid}
}
+
func (rr *GPOS) copy() RR {
- return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
+ return &GPOS{
+ rr.Hdr,
+ rr.Longitude,
+ rr.Latitude,
+ rr.Altitude,
+ }
}
+
func (rr *HINFO) copy() RR {
return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
}
+
func (rr *HIP) copy() RR {
- RendezvousServers := make([]string, len(rr.RendezvousServers))
- copy(RendezvousServers, rr.RendezvousServers)
- return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
+ return &HIP{
+ rr.Hdr,
+ rr.HitLength,
+ rr.PublicKeyAlgorithm,
+ rr.PublicKeyLength,
+ rr.Hit,
+ rr.PublicKey,
+ cloneSlice(rr.RendezvousServers),
+ }
}
+
func (rr *HTTPS) copy() RR {
return &HTTPS{*rr.SVCB.copy().(*SVCB)}
}
+
+func (rr *IPSECKEY) copy() RR {
+ return &IPSECKEY{
+ rr.Hdr,
+ rr.Precedence,
+ rr.GatewayType,
+ rr.Algorithm,
+ cloneSlice(rr.GatewayAddr),
+ rr.GatewayHost,
+ rr.PublicKey,
+ }
+}
+
func (rr *KEY) copy() RR {
return &KEY{*rr.DNSKEY.copy().(*DNSKEY)}
}
+
func (rr *KX) copy() RR {
return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
}
+
func (rr *L32) copy() RR {
- return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
+ return &L32{rr.Hdr, rr.Preference, cloneSlice(rr.Locator32)}
}
+
func (rr *L64) copy() RR {
return &L64{rr.Hdr, rr.Preference, rr.Locator64}
}
+
func (rr *LOC) copy() RR {
- return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
+ return &LOC{
+ rr.Hdr,
+ rr.Version,
+ rr.Size,
+ rr.HorizPre,
+ rr.VertPre,
+ rr.Latitude,
+ rr.Longitude,
+ rr.Altitude,
+ }
}
+
func (rr *LP) copy() RR {
return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
}
+
func (rr *MB) copy() RR {
return &MB{rr.Hdr, rr.Mb}
}
+
func (rr *MD) copy() RR {
return &MD{rr.Hdr, rr.Md}
}
+
func (rr *MF) copy() RR {
return &MF{rr.Hdr, rr.Mf}
}
+
func (rr *MG) copy() RR {
return &MG{rr.Hdr, rr.Mg}
}
+
func (rr *MINFO) copy() RR {
return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
}
+
func (rr *MR) copy() RR {
return &MR{rr.Hdr, rr.Mr}
}
+
func (rr *MX) copy() RR {
return &MX{rr.Hdr, rr.Preference, rr.Mx}
}
+
func (rr *NAPTR) copy() RR {
- return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
+ return &NAPTR{
+ rr.Hdr,
+ rr.Order,
+ rr.Preference,
+ rr.Flags,
+ rr.Service,
+ rr.Regexp,
+ rr.Replacement,
+ }
}
+
func (rr *NID) copy() RR {
return &NID{rr.Hdr, rr.Preference, rr.NodeID}
}
+
func (rr *NIMLOC) copy() RR {
return &NIMLOC{rr.Hdr, rr.Locator}
}
+
func (rr *NINFO) copy() RR {
- ZSData := make([]string, len(rr.ZSData))
- copy(ZSData, rr.ZSData)
- return &NINFO{rr.Hdr, ZSData}
+ return &NINFO{rr.Hdr, cloneSlice(rr.ZSData)}
}
+
func (rr *NS) copy() RR {
return &NS{rr.Hdr, rr.Ns}
}
+
func (rr *NSAPPTR) copy() RR {
return &NSAPPTR{rr.Hdr, rr.Ptr}
}
+
func (rr *NSEC) copy() RR {
- TypeBitMap := make([]uint16, len(rr.TypeBitMap))
- copy(TypeBitMap, rr.TypeBitMap)
- return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
+ return &NSEC{rr.Hdr, rr.NextDomain, cloneSlice(rr.TypeBitMap)}
}
+
func (rr *NSEC3) copy() RR {
- TypeBitMap := make([]uint16, len(rr.TypeBitMap))
- copy(TypeBitMap, rr.TypeBitMap)
- return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
+ return &NSEC3{
+ rr.Hdr,
+ rr.Hash,
+ rr.Flags,
+ rr.Iterations,
+ rr.SaltLength,
+ rr.Salt,
+ rr.HashLength,
+ rr.NextDomain,
+ cloneSlice(rr.TypeBitMap),
+ }
}
+
func (rr *NSEC3PARAM) copy() RR {
- return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
+ return &NSEC3PARAM{
+ rr.Hdr,
+ rr.Hash,
+ rr.Flags,
+ rr.Iterations,
+ rr.SaltLength,
+ rr.Salt,
+ }
}
+
func (rr *NULL) copy() RR {
return &NULL{rr.Hdr, rr.Data}
}
+
func (rr *OPENPGPKEY) copy() RR {
return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
}
+
func (rr *OPT) copy() RR {
Option := make([]EDNS0, len(rr.Option))
for i, e := range rr.Option {
@@ -867,86 +1103,205 @@ func (rr *OPT) copy() RR {
}
return &OPT{rr.Hdr, Option}
}
+
func (rr *PTR) copy() RR {
return &PTR{rr.Hdr, rr.Ptr}
}
+
func (rr *PX) copy() RR {
- return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
+ return &PX{
+ rr.Hdr,
+ rr.Preference,
+ rr.Map822,
+ rr.Mapx400,
+ }
}
+
func (rr *RFC3597) copy() RR {
return &RFC3597{rr.Hdr, rr.Rdata}
}
+
func (rr *RKEY) copy() RR {
- return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+ return &RKEY{
+ rr.Hdr,
+ rr.Flags,
+ rr.Protocol,
+ rr.Algorithm,
+ rr.PublicKey,
+ }
}
+
func (rr *RP) copy() RR {
return &RP{rr.Hdr, rr.Mbox, rr.Txt}
}
+
func (rr *RRSIG) copy() RR {
- return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
+ return &RRSIG{
+ rr.Hdr,
+ rr.TypeCovered,
+ rr.Algorithm,
+ rr.Labels,
+ rr.OrigTtl,
+ rr.Expiration,
+ rr.Inception,
+ rr.KeyTag,
+ rr.SignerName,
+ rr.Signature,
+ }
}
+
func (rr *RT) copy() RR {
return &RT{rr.Hdr, rr.Preference, rr.Host}
}
+
func (rr *SIG) copy() RR {
return &SIG{*rr.RRSIG.copy().(*RRSIG)}
}
+
func (rr *SMIMEA) copy() RR {
- return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
+ return &SMIMEA{
+ rr.Hdr,
+ rr.Usage,
+ rr.Selector,
+ rr.MatchingType,
+ rr.Certificate,
+ }
}
+
func (rr *SOA) copy() RR {
- return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
+ return &SOA{
+ rr.Hdr,
+ rr.Ns,
+ rr.Mbox,
+ rr.Serial,
+ rr.Refresh,
+ rr.Retry,
+ rr.Expire,
+ rr.Minttl,
+ }
}
+
func (rr *SPF) copy() RR {
- Txt := make([]string, len(rr.Txt))
- copy(Txt, rr.Txt)
- return &SPF{rr.Hdr, Txt}
+ return &SPF{rr.Hdr, cloneSlice(rr.Txt)}
}
+
func (rr *SRV) copy() RR {
- return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
+ return &SRV{
+ rr.Hdr,
+ rr.Priority,
+ rr.Weight,
+ rr.Port,
+ rr.Target,
+ }
}
+
func (rr *SSHFP) copy() RR {
- return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
+ return &SSHFP{
+ rr.Hdr,
+ rr.Algorithm,
+ rr.Type,
+ rr.FingerPrint,
+ }
}
+
func (rr *SVCB) copy() RR {
Value := make([]SVCBKeyValue, len(rr.Value))
for i, e := range rr.Value {
Value[i] = e.copy()
}
- return &SVCB{rr.Hdr, rr.Priority, rr.Target, Value}
+ return &SVCB{
+ rr.Hdr,
+ rr.Priority,
+ rr.Target,
+ Value,
+ }
}
+
func (rr *TA) copy() RR {
- return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+ return &TA{
+ rr.Hdr,
+ rr.KeyTag,
+ rr.Algorithm,
+ rr.DigestType,
+ rr.Digest,
+ }
}
+
func (rr *TALINK) copy() RR {
return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
}
+
func (rr *TKEY) copy() RR {
- return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
+ return &TKEY{
+ rr.Hdr,
+ rr.Algorithm,
+ rr.Inception,
+ rr.Expiration,
+ rr.Mode,
+ rr.Error,
+ rr.KeySize,
+ rr.Key,
+ rr.OtherLen,
+ rr.OtherData,
+ }
}
+
func (rr *TLSA) copy() RR {
- return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
+ return &TLSA{
+ rr.Hdr,
+ rr.Usage,
+ rr.Selector,
+ rr.MatchingType,
+ rr.Certificate,
+ }
}
+
func (rr *TSIG) copy() RR {
- return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
+ return &TSIG{
+ rr.Hdr,
+ rr.Algorithm,
+ rr.TimeSigned,
+ rr.Fudge,
+ rr.MACSize,
+ rr.MAC,
+ rr.OrigId,
+ rr.Error,
+ rr.OtherLen,
+ rr.OtherData,
+ }
}
+
func (rr *TXT) copy() RR {
- Txt := make([]string, len(rr.Txt))
- copy(Txt, rr.Txt)
- return &TXT{rr.Hdr, Txt}
+ return &TXT{rr.Hdr, cloneSlice(rr.Txt)}
}
+
func (rr *UID) copy() RR {
return &UID{rr.Hdr, rr.Uid}
}
+
func (rr *UINFO) copy() RR {
return &UINFO{rr.Hdr, rr.Uinfo}
}
+
func (rr *URI) copy() RR {
- return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
+ return &URI{
+ rr.Hdr,
+ rr.Priority,
+ rr.Weight,
+ rr.Target,
+ }
}
+
func (rr *X25) copy() RR {
return &X25{rr.Hdr, rr.PSDNAddress}
}
+
func (rr *ZONEMD) copy() RR {
- return &ZONEMD{rr.Hdr, rr.Serial, rr.Scheme, rr.Hash, rr.Digest}
+ return &ZONEMD{
+ rr.Hdr,
+ rr.Serial,
+ rr.Scheme,
+ rr.Hash,
+ rr.Digest,
+ }
}
diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go
index cd9fbf2d9..97af6a195 100644
--- a/vendor/github.com/minio/sha256-simd/cpuid_other.go
+++ b/vendor/github.com/minio/sha256-simd/cpuid_other.go
@@ -23,6 +23,11 @@ import (
"github.com/klauspost/cpuid/v2"
)
+var (
+ hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4)
+ hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
+)
+
func hasArmSha2() bool {
if cpuid.CPU.Has(cpuid.SHA2) {
return true
@@ -42,5 +47,4 @@ func hasArmSha2() bool {
return false
}
return bytes.Contains(cpuInfo, []byte(sha256Feature))
-
}
diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go
index b137ead9f..f146bbdb5 100644
--- a/vendor/github.com/minio/sha256-simd/sha256.go
+++ b/vendor/github.com/minio/sha256-simd/sha256.go
@@ -19,10 +19,8 @@ package sha256
import (
"crypto/sha256"
"encoding/binary"
+ "errors"
"hash"
- "runtime"
-
- "github.com/klauspost/cpuid/v2"
)
// Size - The size of a SHA256 checksum in bytes.
@@ -68,42 +66,34 @@ func (d *digest) Reset() {
type blockfuncType int
const (
- blockfuncGeneric blockfuncType = iota
- blockfuncSha blockfuncType = iota
- blockfuncArm blockfuncType = iota
+ blockfuncStdlib blockfuncType = iota
+ blockfuncIntelSha
+ blockfuncArmSha2
+ blockfuncForceGeneric = -1
)
var blockfunc blockfuncType
func init() {
- blockfunc = blockfuncGeneric
switch {
- case hasSHAExtensions():
- blockfunc = blockfuncSha
+ case hasIntelSha:
+ blockfunc = blockfuncIntelSha
case hasArmSha2():
- blockfunc = blockfuncArm
- default:
- blockfunc = blockfuncGeneric
+ blockfunc = blockfuncArmSha2
}
}
-var avx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
-
-// hasSHAExtensions return whether the cpu supports SHA extensions.
-func hasSHAExtensions() bool {
- return cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4) && runtime.GOARCH == "amd64"
-}
-
// New returns a new hash.Hash computing the SHA256 checksum.
func New() hash.Hash {
- if blockfunc != blockfuncGeneric {
- d := new(digest)
- d.Reset()
- return d
+ if blockfunc == blockfuncStdlib {
+ // Fallback to the standard golang implementation
+ // if no features were found.
+ return sha256.New()
}
- // Fallback to the standard golang implementation
- // if no features were found.
- return sha256.New()
+
+ d := new(digest)
+ d.Reset()
+ return d
}
// Sum256 - single caller sha256 helper
@@ -272,11 +262,11 @@ func (d *digest) checkSum() (digest [Size]byte) {
}
func block(dig *digest, p []byte) {
- if blockfunc == blockfuncSha {
- blockShaGo(dig, p)
- } else if blockfunc == blockfuncArm {
- blockArmGo(dig, p)
- } else if blockfunc == blockfuncGeneric {
+ if blockfunc == blockfuncIntelSha {
+ blockIntelShaGo(dig, p)
+ } else if blockfunc == blockfuncArmSha2 {
+ blockArmSha2Go(dig, p)
+ } else {
blockGeneric(dig, p)
}
}
@@ -397,3 +387,82 @@ var _K = []uint32{
0xbef9a3f7,
0xc67178f2,
}
+
+const (
+ magic256 = "sha\x03"
+ marshaledSize = len(magic256) + 8*4 + chunk + 8
+)
+
+func (d *digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic256...)
+ b = appendUint32(b, d.h[0])
+ b = appendUint32(b, d.h[1])
+ b = appendUint32(b, d.h[2])
+ b = appendUint32(b, d.h[3])
+ b = appendUint32(b, d.h[4])
+ b = appendUint32(b, d.h[5])
+ b = appendUint32(b, d.h[6])
+ b = appendUint32(b, d.h[7])
+ b = append(b, d.x[:d.nx]...)
+ b = b[:len(b)+len(d.x)-d.nx] // already zero
+ b = appendUint64(b, d.len)
+ return b, nil
+}
+
+func (d *digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
+ return errors.New("crypto/sha256: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("crypto/sha256: invalid hash state size")
+ }
+ b = b[len(magic256):]
+ b, d.h[0] = consumeUint32(b)
+ b, d.h[1] = consumeUint32(b)
+ b, d.h[2] = consumeUint32(b)
+ b, d.h[3] = consumeUint32(b)
+ b, d.h[4] = consumeUint32(b)
+ b, d.h[5] = consumeUint32(b)
+ b, d.h[6] = consumeUint32(b)
+ b, d.h[7] = consumeUint32(b)
+ b = b[copy(d.x[:], b):]
+ b, d.len = consumeUint64(b)
+ d.nx = int(d.len % chunk)
+ return nil
+}
+
+func appendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func appendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>56),
+ byte(v>>48),
+ byte(v>>40),
+ byte(v>>32),
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ _ = b[7]
+ x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+ return b[8:], x
+}
+
+func consumeUint32(b []byte) ([]byte, uint32) {
+ _ = b[3]
+ x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ return b[4:], x
+}
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
index b7d7c1637..4b9473a4e 100644
--- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go
@@ -1,4 +1,5 @@
-//+build !noasm,!appengine,gc
+//go:build !noasm && !appengine && gc
+// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
index 275bcacbc..cca534e46 100644
--- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
+++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s
@@ -1,4 +1,4 @@
-//+build !noasm,!appengine
+//+build !noasm,!appengine,gc
TEXT ·sha256X16Avx512(SB), 7, $0
MOVQ digests+0(FP), DI
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go
deleted file mode 100644
index bef949419..000000000
--- a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go
+++ /dev/null
@@ -1,6 +0,0 @@
-//+build !noasm,!appengine,gc
-
-package sha256
-
-//go:noescape
-func blockSha(h *[8]uint32, message []uint8)
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
index 0c48d45f8..e536f54e1 100644
--- a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
+++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go
@@ -1,4 +1,5 @@
-//+build !noasm,!appengine,gc
+//go:build !noasm && !appengine && gc
+// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
@@ -18,10 +19,13 @@
package sha256
-func blockArmGo(dig *digest, p []byte) {
- panic("blockArmGo called unexpectedly")
+func blockArmSha2Go(dig *digest, p []byte) {
+ panic("blockArmSha2Go called unexpectedly")
}
-func blockShaGo(dig *digest, p []byte) {
- blockSha(&dig.h, p)
+//go:noescape
+func blockIntelSha(h *[8]uint32, message []uint8)
+
+func blockIntelShaGo(dig *digest, p []byte) {
+ blockIntelSha(&dig.h, p)
}
diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/vendor/github.com/minio/sha256-simd/sha256block_amd64.s
similarity index 99%
rename from vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s
rename to vendor/github.com/minio/sha256-simd/sha256block_amd64.s
index 909fc0ef8..c98a1d8f0 100644
--- a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s
+++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.s
@@ -1,4 +1,4 @@
-//+build !noasm,!appengine
+//+build !noasm,!appengine,gc
// SHA intrinsic version of SHA256
@@ -106,7 +106,7 @@ GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16
// X13 saved hash state // CDGH
// X15 data shuffle mask (constant)
-TEXT ·blockSha(SB), NOSPLIT, $0-32
+TEXT ·blockIntelSha(SB), NOSPLIT, $0-32
MOVQ h+0(FP), DX
MOVQ message_base+8(FP), SI
MOVQ message_len+16(FP), DI
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
index 58ccf6eb5..d4369e24a 100644
--- a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
+++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go
@@ -1,4 +1,5 @@
-//+build !noasm,!appengine,gc
+//go:build !noasm && !appengine && gc
+// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
@@ -18,18 +19,18 @@
package sha256
-func blockShaGo(dig *digest, p []byte) {
- panic("blockShaGoc called unexpectedly")
+func blockIntelShaGo(dig *digest, p []byte) {
+ panic("blockIntelShaGo called unexpectedly")
}
//go:noescape
-func blockArm(h []uint32, message []uint8)
+func blockArmSha2(h []uint32, message []uint8)
-func blockArmGo(dig *digest, p []byte) {
+func blockArmSha2Go(dig *digest, p []byte) {
h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]}
- blockArm(h[:], p[:])
+ blockArmSha2(h[:], p[:])
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4],
h[5], h[6], h[7]
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
index c6ddb3717..7ab88b163 100644
--- a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
+++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s
@@ -1,4 +1,4 @@
-//+build !noasm,!appengine
+//+build !noasm,!appengine,gc
// ARM64 version of SHA256
@@ -25,7 +25,7 @@
// their Plan9 equivalents
//
-TEXT ·blockArm(SB), 7, $0
+TEXT ·blockArmSha2(SB), 7, $0
MOVD h+0(FP), R0
MOVD message+24(FP), R1
MOVD message_len+32(FP), R2 // length of message
diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go
index ec586c060..94d7eb0b4 100644
--- a/vendor/github.com/minio/sha256-simd/sha256block_other.go
+++ b/vendor/github.com/minio/sha256-simd/sha256block_other.go
@@ -1,4 +1,5 @@
-//+build appengine noasm !amd64,!arm64 !gc
+//go:build appengine || noasm || (!amd64 && !arm64) || !gc
+// +build appengine noasm !amd64,!arm64 !gc
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
@@ -18,11 +19,11 @@
package sha256
-func blockShaGo(dig *digest, p []byte) {
- panic("blockShaGo called unexpectedly")
+func blockIntelShaGo(dig *digest, p []byte) {
+ panic("blockIntelShaGo called unexpectedly")
}
-func blockArmGo(dig *digest, p []byte) {
- panic("blockArmGo called unexpectedly")
+func blockArmSha2Go(dig *digest, p []byte) {
+ panic("blockArmSha2Go called unexpectedly")
}
diff --git a/vendor/github.com/multiformats/go-base32/base32.go b/vendor/github.com/multiformats/go-base32/base32.go
index 8c43b6f5d..de7fd7901 100644
--- a/vendor/github.com/multiformats/go-base32/base32.go
+++ b/vendor/github.com/multiformats/go-base32/base32.go
@@ -93,7 +93,7 @@ func (enc Encoding) WithPadding(padding rune) *Encoding {
// RFC 4648.
var StdEncoding = NewEncodingCI(encodeStd)
-// HexEncoding is the ``Extended Hex Alphabet'' defined in RFC 4648.
+// HexEncoding is the “Extended Hex Alphabet” defined in RFC 4648.
// It is typically used in DNS.
var HexEncoding = NewEncodingCI(encodeHex)
@@ -226,6 +226,7 @@ func (e *encoder) Write(p []byte) (n int, err error) {
}
// Trailing fringe.
+ //lint:ignore S1001 fixed-length 5-byte slice
for i := 0; i < len(p); i++ {
e.buf[i] = p[i]
}
diff --git a/vendor/github.com/libp2p/go-openssl/version.json b/vendor/github.com/multiformats/go-base32/version.json
similarity index 100%
rename from vendor/github.com/libp2p/go-openssl/version.json
rename to vendor/github.com/multiformats/go-base32/version.json
diff --git a/vendor/github.com/multiformats/go-base36/README.md b/vendor/github.com/multiformats/go-base36/README.md
index a92e27cb2..1f1ffccdf 100644
--- a/vendor/github.com/multiformats/go-base36/README.md
+++ b/vendor/github.com/multiformats/go-base36/README.md
@@ -7,7 +7,7 @@ This is an optimized codec for []byte <=> base36 string conversion
## Documentation
-https://pkg.go.dev/github.com/multicodec/go-base36
+https://pkg.go.dev/github.com/multiformats/go-base36
## Lead Maintainer
diff --git a/vendor/github.com/multiformats/go-base36/base36.go b/vendor/github.com/multiformats/go-base36/base36.go
index e4cb9316a..1792b4948 100644
--- a/vendor/github.com/multiformats/go-base36/base36.go
+++ b/vendor/github.com/multiformats/go-base36/base36.go
@@ -1,7 +1,5 @@
/*
-
Package base36 provides a reasonably fast implementation of a binary base36 codec.
-
*/
package base36
@@ -14,7 +12,7 @@ import (
const UcAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
const LcAlphabet = "0123456789abcdefghijklmnopqrstuvwxyz"
-const maxDigitOrdinal = byte('z')
+const maxDigitOrdinal = 'z'
const maxDigitValueB36 = 35
var revAlphabet [maxDigitOrdinal + 1]byte
@@ -41,58 +39,49 @@ func EncodeToStringLc(b []byte) string { return encode(b, LcAlphabet) }
func encode(inBuf []byte, al string) string {
- // As a polar opposite to the base58 implementation, using a uint32 here is
- // significantly slower
- var carry uint64
-
- var encIdx, valIdx, zcnt, high int
-
- inSize := len(inBuf)
- for zcnt < inSize && inBuf[zcnt] == 0 {
+ bufsz := len(inBuf)
+ zcnt := 0
+ for zcnt < bufsz && inBuf[zcnt] == 0 {
zcnt++
}
- // Really this is log(256)/log(36) or 1.55, but integer math is easier
- // Use 2 as a constant and just overallocate
- encSize := (inSize - zcnt) * 2
+ // It is crucial to make this as short as possible, especially for
+ // the usual case of CIDs.
+ bufsz = zcnt +
+ // This is an integer simplification of
+ // ceil(log(256)/log(36))
+ (bufsz-zcnt)*277/179 + 1
- // Allocate one big buffer up front
- // Note: pools *DO NOT* help, the overhead of zeroing the val-half (see below)
+ // Note: pools *DO NOT* help, the overhead of zeroing
// kills any performance gain to be had
- outBuf := make([]byte, (zcnt + encSize*2))
+ out := make([]byte, bufsz)
- // use the second half for the temporary numeric buffer
- val := outBuf[encSize+zcnt:]
+ var idx, stopIdx int
+ var carry uint32
- high = encSize - 1
+ stopIdx = bufsz - 1
for _, b := range inBuf[zcnt:] {
- valIdx = encSize - 1
- for carry = uint64(b); valIdx > high || carry != 0; valIdx-- {
- carry += uint64((val[valIdx])) * 256
- val[valIdx] = byte(carry % 36)
+ idx = bufsz - 1
+ for carry = uint32(b); idx > stopIdx || carry != 0; idx-- {
+ carry += uint32((out[idx])) * 256
+ out[idx] = byte(carry % 36)
carry /= 36
}
- high = valIdx
- }
-
- // Reset the value index to the first significant value position
- for valIdx = 0; valIdx < encSize && val[valIdx] == 0; valIdx++ {
+ stopIdx = idx
}
- // Now write the known-length result to first half of buffer
- encSize += zcnt - valIdx
-
- for encIdx = 0; encIdx < zcnt; encIdx++ {
- outBuf[encIdx] = '0'
+ // Determine the additional "zero-gap" in the buffer (aside from zcnt)
+ for stopIdx = zcnt; stopIdx < bufsz && out[stopIdx] == 0; stopIdx++ {
}
- for encIdx < encSize {
- outBuf[encIdx] = al[val[valIdx]]
- encIdx++
- valIdx++
+ // Now encode the values with actual alphabet in-place
+ vBuf := out[stopIdx-zcnt:]
+ bufsz = len(vBuf)
+ for idx = 0; idx < bufsz; idx++ {
+ out[idx] = al[vBuf[idx]]
}
- return string(outBuf[:encSize])
+ return string(out[:bufsz])
}
// DecodeString takes a base36 encoded string and returns a slice of the decoded
@@ -103,30 +92,27 @@ func DecodeString(s string) ([]byte, error) {
return nil, fmt.Errorf("can not decode zero-length string")
}
- var zcnt int
-
- for i := 0; i < len(s) && s[i] == '0'; i++ {
+ zcnt := 0
+ for zcnt < len(s) && s[zcnt] == '0' {
zcnt++
}
- var t, c uint64
-
- outi := make([]uint32, (len(s)+3)/4)
- binu := make([]byte, (len(s)+3)*3)
+ // the 32bit algo stretches the result up to 2 times
+ binu := make([]byte, 2*(((len(s))*179/277)+1)) // no more than 84 bytes when len(s) <= 64
+ outi := make([]uint32, (len(s)+3)/4) // no more than 16 bytes when len(s) <= 64
for _, r := range s {
- if r > rune(maxDigitOrdinal) || revAlphabet[r] > maxDigitValueB36 {
+ if r > maxDigitOrdinal || revAlphabet[r] > maxDigitValueB36 {
return nil, fmt.Errorf("invalid base36 character (%q)", r)
}
- c = uint64(revAlphabet[r])
+ c := uint64(revAlphabet[r])
for j := len(outi) - 1; j >= 0; j-- {
- t = uint64(outi[j])*36 + c
+ t := uint64(outi[j])*36 + c
c = (t >> 32)
outi[j] = uint32(t & 0xFFFFFFFF)
}
-
}
mask := (uint(len(s)%4) * 8)
@@ -134,20 +120,24 @@ func DecodeString(s string) ([]byte, error) {
mask = 32
}
mask -= 8
- var j, cnt int
- for j, cnt = 0, 0; j < len(outi); j++ {
+
+ outidx := 0
+ for j := 0; j < len(outi); j++ {
for mask < 32 { // loop relies on uint overflow
- binu[cnt] = byte(outi[j] >> mask)
+ binu[outidx] = byte(outi[j] >> mask)
mask -= 8
- cnt++
+ outidx++
}
mask = 24
}
- for n := zcnt; n < len(binu); n++ {
- if binu[n] > 0 {
- return binu[n-zcnt : cnt], nil
+ // find the most significant byte post-decode, if any
+ for msb := zcnt; msb < outidx; msb++ {
+ if binu[msb] > 0 {
+ return binu[msb-zcnt : outidx : outidx], nil
}
}
- return binu[:cnt], nil
+
+ // it's all zeroes
+ return binu[:outidx:outidx], nil
}
diff --git a/vendor/github.com/multiformats/go-base36/version.json b/vendor/github.com/multiformats/go-base36/version.json
new file mode 100644
index 000000000..1437d5b73
--- /dev/null
+++ b/vendor/github.com/multiformats/go-base36/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.2.0"
+}
diff --git a/vendor/github.com/multiformats/go-multiaddr/doc.go b/vendor/github.com/multiformats/go-multiaddr/doc.go
index d8c37b265..b80f3ab2c 100644
--- a/vendor/github.com/multiformats/go-multiaddr/doc.go
+++ b/vendor/github.com/multiformats/go-multiaddr/doc.go
@@ -7,30 +7,29 @@ Learn more at https://github.com/multiformats/multiaddr
Basic Use:
- import (
- "bytes"
- "strings"
- ma "github.com/multiformats/go-multiaddr"
- )
+ import (
+ "bytes"
+ "strings"
+ ma "github.com/multiformats/go-multiaddr"
+ )
- // construct from a string (err signals parse failure)
- m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
+ // construct from a string (err signals parse failure)
+ m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
- // construct from bytes (err signals parse failure)
- m2, err := ma.NewMultiaddrBytes(m1.Bytes())
+ // construct from bytes (err signals parse failure)
+ m2, err := ma.NewMultiaddrBytes(m1.Bytes())
- // true
- strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234")
- strings.Equal(m1.String(), m2.String())
- bytes.Equal(m1.Bytes(), m2.Bytes())
- m1.Equal(m2)
- m2.Equal(m1)
-
- // tunneling (en/decap)
- printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
- proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
- printerOverProxy := proxy.Encapsulate(printer)
- proxyAgain := printerOverProxy.Decapsulate(printer)
+ // true
+ strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234")
+ strings.Equal(m1.String(), m2.String())
+ bytes.Equal(m1.Bytes(), m2.Bytes())
+ m1.Equal(m2)
+ m2.Equal(m1)
+ // tunneling (en/decap)
+ printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80")
+ proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443")
+ printerOverProxy := proxy.Encapsulate(printer)
+ proxyAgain := printerOverProxy.Decapsulate(printer)
*/
package multiaddr
diff --git a/vendor/github.com/multiformats/go-multiaddr/filter.go b/vendor/github.com/multiformats/go-multiaddr/filter.go
index cc13aeeb6..ba915da00 100644
--- a/vendor/github.com/multiformats/go-multiaddr/filter.go
+++ b/vendor/github.com/multiformats/go-multiaddr/filter.go
@@ -85,8 +85,9 @@ func (fs *Filters) RemoveLiteral(ipnet net.IPNet) (removed bool) {
// default is returned.
//
// TODO: currently, the last filter to match wins always, but it shouldn't be that way.
-// Instead, the highest-specific last filter should win; that way more specific filters
-// override more general ones.
+//
+// Instead, the highest-specific last filter should win; that way more specific filters
+// override more general ones.
func (fs *Filters) AddrBlocked(a Multiaddr) (deny bool) {
var (
netip net.IP
diff --git a/vendor/github.com/multiformats/go-multiaddr/interface.go b/vendor/github.com/multiformats/go-multiaddr/interface.go
index 82cc76401..699c54d1d 100644
--- a/vendor/github.com/multiformats/go-multiaddr/interface.go
+++ b/vendor/github.com/multiformats/go-multiaddr/interface.go
@@ -12,11 +12,10 @@ Learn more here: https://github.com/multiformats/multiaddr
Multiaddrs have both a binary and string representation.
- import ma "github.com/multiformats/go-multiaddr"
-
- addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80")
- // err non-nil when parsing failed.
+ import ma "github.com/multiformats/go-multiaddr"
+ addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80")
+ // err non-nil when parsing failed.
*/
type Multiaddr interface {
json.Marshaler
@@ -48,9 +47,11 @@ type Multiaddr interface {
//
Encapsulate(Multiaddr) Multiaddr
- // Decapsultate removes a Multiaddr wrapping. For example:
+ // Decapsulate removes a Multiaddr wrapping. For example:
//
- // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80
+ // /ip4/1.2.3.4/tcp/80 decapsulate /tcp/80 = /ip4/1.2.3.4
+ // /ip4/1.2.3.4/tcp/80 decapsulate /udp/80 = /ip4/1.2.3.4/tcp/80
+ // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = nil
//
Decapsulate(Multiaddr) Multiaddr
diff --git a/vendor/github.com/multiformats/go-multiaddr/net/net.go b/vendor/github.com/multiformats/go-multiaddr/net/net.go
index 16f1bbc53..10fcff700 100644
--- a/vendor/github.com/multiformats/go-multiaddr/net/net.go
+++ b/vendor/github.com/multiformats/go-multiaddr/net/net.go
@@ -81,11 +81,11 @@ func wrap(nconn net.Conn, laddr, raddr ma.Multiaddr) Conn {
// This function does it's best to avoid "hiding" methods exposed by the wrapped
// type. Guarantees:
//
-// * If the wrapped connection exposes the "half-open" closer methods
-// (CloseWrite, CloseRead), these will be available on the wrapped connection
-// via type assertions.
-// * If the wrapped connection is a UnixConn, IPConn, TCPConn, or UDPConn, all
-// methods on these wrapped connections will be available via type assertions.
+// - If the wrapped connection exposes the "half-open" closer methods
+// (CloseWrite, CloseRead), these will be available on the wrapped connection
+// via type assertions.
+// - If the wrapped connection is a UnixConn, IPConn, TCPConn, or UDPConn, all
+// methods on these wrapped connections will be available via type assertions.
func WrapNetConn(nconn net.Conn) (Conn, error) {
if nconn == nil {
return nil, fmt.Errorf("failed to convert nconn.LocalAddr: nil")
@@ -224,9 +224,9 @@ func (nla *netListenerAdapter) Accept() (net.Conn, error) {
// NetListener turns this Listener into a net.Listener.
//
-// * Connections returned from Accept implement multiaddr/net Conn.
-// * Calling WrapNetListener on the net.Listener returned by this function will
-// return the original (underlying) multiaddr/net Listener.
+// - Connections returned from Accept implement multiaddr/net Conn.
+// - Calling WrapNetListener on the net.Listener returned by this function will
+// return the original (underlying) multiaddr/net Listener.
func NetListener(l Listener) net.Listener {
return &netListenerAdapter{l}
}
diff --git a/vendor/github.com/multiformats/go-multiaddr/protocols.go b/vendor/github.com/multiformats/go-multiaddr/protocols.go
index 28b395995..b01e6cb8b 100644
--- a/vendor/github.com/multiformats/go-multiaddr/protocols.go
+++ b/vendor/github.com/multiformats/go-multiaddr/protocols.go
@@ -3,39 +3,43 @@ package multiaddr
// You **MUST** register your multicodecs with
// https://github.com/multiformats/multicodec before adding them here.
const (
- P_IP4 = 0x0004
- P_TCP = 0x0006
- P_DNS = 0x0035 // 4 or 6
- P_DNS4 = 0x0036
- P_DNS6 = 0x0037
- P_DNSADDR = 0x0038
- P_UDP = 0x0111
- P_DCCP = 0x0021
- P_IP6 = 0x0029
- P_IP6ZONE = 0x002A
- P_IPCIDR = 0x002B
- P_QUIC = 0x01CC
- P_WEBTRANSPORT = 0x01D1
- P_CERTHASH = 0x01D2
- P_SCTP = 0x0084
- P_CIRCUIT = 0x0122
- P_UDT = 0x012D
- P_UTP = 0x012E
- P_UNIX = 0x0190
- P_P2P = 0x01A5
- P_IPFS = 0x01A5 // alias for backwards compatibility
- P_HTTP = 0x01E0
- P_HTTPS = 0x01BB // deprecated alias for /tls/http
- P_ONION = 0x01BC // also for backwards compatibility
- P_ONION3 = 0x01BD
- P_GARLIC64 = 0x01BE
- P_GARLIC32 = 0x01BF
- P_P2P_WEBRTC_DIRECT = 0x0114
- P_TLS = 0x01c0
- P_NOISE = 0x01c6
- P_WS = 0x01DD
- P_WSS = 0x01DE // deprecated alias for /tls/ws
- P_PLAINTEXTV2 = 0x706c61
+ P_IP4 = 4
+ P_TCP = 6
+ P_DNS = 53 // 4 or 6
+ P_DNS4 = 54
+ P_DNS6 = 55
+ P_DNSADDR = 56
+ P_UDP = 273
+ P_DCCP = 33
+ P_IP6 = 41
+ P_IP6ZONE = 42
+ P_IPCIDR = 43
+ P_QUIC = 460
+ P_QUIC_V1 = 461
+ P_WEBTRANSPORT = 465
+ P_CERTHASH = 466
+ P_SCTP = 132
+ P_CIRCUIT = 290
+ P_UDT = 301
+ P_UTP = 302
+ P_UNIX = 400
+ P_P2P = 421
+ P_IPFS = P_P2P // alias for backwards compatibility
+ P_HTTP = 480
+ P_HTTPS = 443 // deprecated alias for /tls/http
+ P_ONION = 444 // also for backwards compatibility
+ P_ONION3 = 445
+ P_GARLIC64 = 446
+ P_GARLIC32 = 447
+ P_P2P_WEBRTC_DIRECT = 276 // Deprecated. use webrtc-direct instead
+ P_TLS = 448
+ P_SNI = 449
+ P_NOISE = 454
+ P_WS = 477
+ P_WSS = 478 // deprecated alias for /tls/ws
+ P_PLAINTEXTV2 = 7367777
+ P_WEBRTC_DIRECT = 280
+ P_WEBRTC = 281
)
var (
@@ -180,6 +184,11 @@ var (
Code: P_QUIC,
VCode: CodeToVarint(P_QUIC),
}
+ protoQUICV1 = Protocol{
+ Name: "quic-v1",
+ Code: P_QUIC_V1,
+ VCode: CodeToVarint(P_QUIC_V1),
+ }
protoWEBTRANSPORT = Protocol{
Name: "webtransport",
Code: P_WEBTRANSPORT,
@@ -227,6 +236,13 @@ var (
Code: P_TLS,
VCode: CodeToVarint(P_TLS),
}
+ protoSNI = Protocol{
+ Name: "sni",
+ Size: LengthPrefixedVarSize,
+ Code: P_SNI,
+ VCode: CodeToVarint(P_SNI),
+ Transcoder: TranscoderDns,
+ }
protoNOISE = Protocol{
Name: "noise",
Code: P_NOISE,
@@ -247,6 +263,16 @@ var (
Code: P_WSS,
VCode: CodeToVarint(P_WSS),
}
+ protoWebRTCDirect = Protocol{
+ Name: "webrtc-direct",
+ Code: P_WEBRTC_DIRECT,
+ VCode: CodeToVarint(P_WEBRTC_DIRECT),
+ }
+ protoWebRTC = Protocol{
+ Name: "webrtc",
+ Code: P_WEBRTC,
+ VCode: CodeToVarint(P_WEBRTC),
+ }
)
func init() {
@@ -271,6 +297,7 @@ func init() {
protoUTP,
protoUDT,
protoQUIC,
+ protoQUICV1,
protoWEBTRANSPORT,
protoCERTHASH,
protoHTTP,
@@ -279,10 +306,13 @@ func init() {
protoUNIX,
protoP2P_WEBRTC_DIRECT,
protoTLS,
+ protoSNI,
protoNOISE,
protoWS,
protoWSS,
protoPlaintextV2,
+ protoWebRTCDirect,
+ protoWebRTC,
} {
if err := AddProtocol(p); err != nil {
panic(err)
diff --git a/vendor/github.com/multiformats/go-multiaddr/version.json b/vendor/github.com/multiformats/go-multiaddr/version.json
index 42c14d1be..960b84e55 100644
--- a/vendor/github.com/multiformats/go-multiaddr/version.json
+++ b/vendor/github.com/multiformats/go-multiaddr/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.6.0"
+ "version": "v0.9.0"
}
diff --git a/vendor/github.com/multiformats/go-multibase/version.json b/vendor/github.com/multiformats/go-multibase/version.json
index 5e94b0fa6..1437d5b73 100644
--- a/vendor/github.com/multiformats/go-multibase/version.json
+++ b/vendor/github.com/multiformats/go-multibase/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.1.1"
+ "version": "v0.2.0"
}
diff --git a/vendor/github.com/multiformats/go-multicodec/README.md b/vendor/github.com/multiformats/go-multicodec/README.md
index d8f7c9557..cb42a52aa 100644
--- a/vendor/github.com/multiformats/go-multicodec/README.md
+++ b/vendor/github.com/multiformats/go-multicodec/README.md
@@ -7,8 +7,14 @@
## Table of Contents
- [Install](#install)
+- [Type](#type)
- [Usage](#usage)
+ - [Importing Code constant](#importing-code-constant)
+ - [Code from string](#code-from-string)
+ - [Code from uint64](#code-from-uint64)
- [Generator](#generator)
+ - [With old table.csv](#with-old-tablecsv)
+ - [With updated table.csv](#with-updated-tablecsv)
- [Maintainers](#maintainers)
- [Contribute](#contribute)
- [License](#license)
@@ -61,15 +67,31 @@ rawCode := multicodec.Code(0x55)
## Generator
+### With old table.csv
+
To generate the constants yourself:
- git clone https://github.com/multiformats/go-multicodec
- cd go-multicodec
- git submodule init && git submodule update
- go generate
+```console
+$ git clone https://github.com/multiformats/go-multicodec
+$ cd go-multicodec
+$ git submodule init && git submodule update
+$ go generate
+```
Note: You may need to install `stringer` via `go install golang.org/x/tools/cmd/stringer`.
+### With updated table.csv
+
+To generate the constants for the latest [table.csv](https://github.com/multiformats/multicodec/blob/master/table.csv):
+
+```console
+$ git clone https://github.com/multiformats/go-multicodec
+$ cd go-multicodec
+$ git submodule init
+$ git submodule update --remote # updates ./multicodec/table.csv to upstream version
+$ go generate
+```
+
## Maintainers
[@mvdan](https://github.com/mvdan).
diff --git a/vendor/github.com/multiformats/go-multicodec/code.go b/vendor/github.com/multiformats/go-multicodec/code.go
index ed6b1dd8f..6fc8ecc11 100644
--- a/vendor/github.com/multiformats/go-multicodec/code.go
+++ b/vendor/github.com/multiformats/go-multicodec/code.go
@@ -9,7 +9,7 @@ import (
//go:generate go run gen.go
//go:generate gofmt -w code_table.go
-//go:generate go run golang.org/x/tools/cmd/stringer@v0.1.10 -type=Code -linecomment
+//go:generate go run golang.org/x/tools/cmd/stringer@v0.5.0 -type=Code -linecomment
// Code describes an integer reserved in the multicodec table, defined at
// github.com/multiformats/multicodec.
diff --git a/vendor/github.com/multiformats/go-multicodec/code_string.go b/vendor/github.com/multiformats/go-multicodec/code_string.go
index ca8669848..8850be651 100644
--- a/vendor/github.com/multiformats/go-multicodec/code_string.go
+++ b/vendor/github.com/multiformats/go-multicodec/code_string.go
@@ -34,6 +34,7 @@ func _() {
_ = x[Murmur3_32-35]
_ = x[Ip6-41]
_ = x[Ip6zone-42]
+ _ = x[Ipcidr-43]
_ = x[Path-47]
_ = x[Multicodec-48]
_ = x[Multihash-49]
@@ -88,44 +89,53 @@ func _() {
_ = x[StellarTx-209]
_ = x[Md4-212]
_ = x[Md5-213]
- _ = x[Bmt-214]
_ = x[DecredBlock-224]
_ = x[DecredTx-225]
- _ = x[IpldNs-226]
- _ = x[IpfsNs-227]
- _ = x[SwarmNs-228]
- _ = x[IpnsNs-229]
+ _ = x[Ipld-226]
+ _ = x[Ipfs-227]
+ _ = x[Swarm-228]
+ _ = x[Ipns-229]
_ = x[Zeronet-230]
_ = x[Secp256k1Pub-231]
+ _ = x[Dnslink-232]
_ = x[Bls12_381G1Pub-234]
_ = x[Bls12_381G2Pub-235]
_ = x[X25519Pub-236]
_ = x[Ed25519Pub-237]
_ = x[Bls12_381G1g2Pub-238]
+ _ = x[Sr25519Pub-239]
_ = x[DashBlock-240]
_ = x[DashTx-241]
_ = x[SwarmManifest-250]
_ = x[SwarmFeed-251]
+ _ = x[Beeson-252]
_ = x[Udp-273]
_ = x[P2pWebrtcStar-275]
_ = x[P2pWebrtcDirect-276]
_ = x[P2pStardust-277]
+ _ = x[WebrtcDirect-280]
+ _ = x[Webrtc-281]
_ = x[P2pCircuit-290]
_ = x[DagJson-297]
_ = x[Udt-301]
_ = x[Utp-302]
+ _ = x[Crc32-306]
+ _ = x[Crc64Ecma-356]
_ = x[Unix-400]
_ = x[Thread-406]
_ = x[P2p-421]
- _ = x[Ipfs-421]
_ = x[Https-443]
_ = x[Onion-444]
_ = x[Onion3-445]
_ = x[Garlic64-446]
_ = x[Garlic32-447]
_ = x[Tls-448]
+ _ = x[Sni-449]
_ = x[Noise-454]
_ = x[Quic-460]
+ _ = x[QuicV1-461]
+ _ = x[Webtransport-465]
+ _ = x[Certhash-466]
_ = x[Ws-477]
_ = x[Wss-478]
_ = x[P2pWebsocketStar-479]
@@ -134,12 +144,16 @@ func _() {
_ = x[Json-512]
_ = x[Messagepack-513]
_ = x[Car-514]
+ _ = x[IpnsRecord-768]
_ = x[Libp2pPeerRecord-769]
_ = x[Libp2pRelayRsvp-770]
+ _ = x[Memorytransport-777]
_ = x[CarIndexSorted-1024]
_ = x[CarMultihashIndexSorted-1025]
_ = x[TransportBitswap-2304]
_ = x[TransportGraphsyncFilecoinv1-2320]
+ _ = x[TransportIpfsGatewayHttp-2336]
+ _ = x[Multidid-3357]
_ = x[Sha2_256Trunc254Padded-4114]
_ = x[Sha2_224-4115]
_ = x[Sha2_512_224-4116]
@@ -156,10 +170,18 @@ func _() {
_ = x[Ed448Pub-4611]
_ = x[X448Pub-4612]
_ = x[RsaPub-4613]
+ _ = x[Sm2Pub-4614]
_ = x[Ed25519Priv-4864]
_ = x[Secp256k1Priv-4865]
_ = x[X25519Priv-4866]
+ _ = x[Sr25519Priv-4867]
+ _ = x[RsaPriv-4869]
+ _ = x[P256Priv-4870]
+ _ = x[P384Priv-4871]
+ _ = x[P521Priv-4872]
_ = x[Kangarootwelve-7425]
+ _ = x[AesGcm256-8192]
+ _ = x[Silverpine-16194]
_ = x[Sm3_256-21325]
_ = x[Blake2b8-45569]
_ = x[Blake2b16-45570]
@@ -481,9 +503,25 @@ func _() {
_ = x[Skein1024_1008-46046]
_ = x[Skein1024_1016-46047]
_ = x[Skein1024_1024-46048]
+ _ = x[Xxh32-46049]
+ _ = x[Xxh64-46050]
+ _ = x[Xxh3_64-46051]
+ _ = x[Xxh3_128-46052]
_ = x[PoseidonBls12_381A2Fc1-46081]
_ = x[PoseidonBls12_381A2Fc1Sc-46082]
+ _ = x[Urdca2015Canon-46083]
+ _ = x[Ssz-46337]
+ _ = x[SszSha2_256Bmt-46338]
+ _ = x[JsonJcs-46593]
+ _ = x[Iscc-52225]
_ = x[ZeroxcertImprint256-52753]
+ _ = x[Varsig-53248]
+ _ = x[Es256k-53479]
+ _ = x[Bls12381G1Sig-53482]
+ _ = x[Bls12381G2Sig-53483]
+ _ = x[Eddsa-53485]
+ _ = x[Eip191-53649]
+ _ = x[Jwk_jcsPub-60241]
_ = x[FilCommitmentUnsealed-61697]
_ = x[FilCommitmentSealed-61698]
_ = x[Plaintextv2-7367777]
@@ -496,9 +534,14 @@ func _() {
_ = x[SkynetNs-11639056]
_ = x[ArweaveNs-11704592]
_ = x[SubspaceNs-11770128]
+ _ = x[KumandraNs-11835664]
+ _ = x[Es256-13636096]
+ _ = x[Es284-13636097]
+ _ = x[Es512-13636098]
+ _ = x[Rs256-13636101]
}
-const _Code_name = "identitycidv1cidv2cidv3ip4tcpsha1sha2-256sha2-512sha3-512sha3-384sha3-256sha3-224shake-128shake-256keccak-224keccak-256keccak-384keccak-512blake3sha2-384dccpmurmur3-x64-64murmur3-32ip6ip6zonepathmulticodecmultihashmultiaddrmultibasednsdns4dns6dnsaddrprotobufcborrawdbl-sha2-256rlpbencodedag-pbdag-cborlibp2p-keygit-rawtorrent-infotorrent-fileleofcoin-blockleofcoin-txleofcoin-prsctpdag-josedag-coseeth-blocketh-block-listeth-tx-trieeth-txeth-tx-receipt-trieeth-tx-receipteth-state-trieeth-account-snapshoteth-storage-trieeth-receipt-log-trieeth-reciept-logaes-128aes-192aes-256chacha-128chacha-256bitcoin-blockbitcoin-txbitcoin-witness-commitmentzcash-blockzcash-txcaip-50streamidstellar-blockstellar-txmd4md5bmtdecred-blockdecred-txipld-nsipfs-nsswarm-nsipns-nszeronetsecp256k1-pubbls12_381-g1-pubbls12_381-g2-pubx25519-pubed25519-pubbls12_381-g1g2-pubdash-blockdash-txswarm-manifestswarm-feedudpp2p-webrtc-starp2p-webrtc-directp2p-stardustp2p-circuitdag-jsonudtutpunixthreadp2phttpsoniononion3garlic64garlic32tlsnoisequicwswssp2p-websocket-starhttpswhid-1-snpjsonmessagepackcarlibp2p-peer-recordlibp2p-relay-rsvpcar-index-sortedcar-multihash-index-sortedtransport-bitswaptransport-graphsync-filecoinv1sha2-256-trunc254-paddedsha2-224sha2-512-224sha2-512-256murmur3-x64-128ripemd-128ripemd-160ripemd-256ripemd-320x11p256-pubp384-pubp521-pubed448-pubx448-pubrsa-pubed25519-privsecp256k1-privx25519-privkangarootwelvesm3-256blake2b-8blake2b-16blake2b-24blake2b-32blake2b-40blake2b-48blake2b-56blake2b-64blake2b-72blake2b-80blake2b-88blake2b-96blake2b-104blake2b-112blake2b-120blake2b-128blake2b-136blake2b-144blake2b-152blake2b-160blake2b-168blake2b-176blake2b-184blake2b-192blake2b-200blake2b-208blake2b-216blake2b-224blake2b-232blake2b-240blake2b-248blake2b-256blake2b-264blake2b-272blake2b-280blake2b-288blake2b-296blake2b-304blake2b-312blake2b-320blake2b-328blake2b-336blake2b-344blake2b-352blake2b-360blake2b-368blake2b-376blake2b-384blake2b-392blake2b-400blake2b-408blake2b-416blake2b-424blake2b-432blake2b-440blake2b-448blake2b-456blake2b-464blake2b-472blake2b-480blake2b-488blake2b-496blake2b-504blake2b-512blake2s-8blake2s-16blake2s-24blake2s-32blake2s-40blake2s-48blake2s-56blake2s-64blake2s-72blake2s-80blake2s-88blake2s-96blake2s-104blake2s-112blake2s-120blake2s-128blake2s-136blake2s-144blake2s-152blake2s-160blake2s-168blake2s-176blake2s-184blake2s-192blake2s-200blake2s-208blake2s-216blake2s-224blake2s-232blake2s-240blake2s-248blake2s-256skein256-8skein256-16skein256-24skein256-32skein256-40skein256-48skein256-56skein256-64skein256-72skein256-80skein256-88skein256-96skein256-104skein256-112skein256-120skein256-128skein256-136skein256-144skein256-152skein256-160skein256-168skein256-176skein256-184skein256-192skein256-200skein256-208skein256-216skein256-224skein256-232skein256-240skein256-248skein256-256skein512-8skein512-16skein512-24skein512-32skein512-40skein512-48skein512-56skein512-64skein512-72skein512-80skein512-88skein512-96skein512-104skein512-112skein512-120skein512-128skein512-136skein512-144skein512-152skein512-160skein512-168skein512-176skein512-184skein512-192skein512-200skein512-208skein512-216skein512-224skein512-232skein512-240skein512-248skein512-256skein512-264skein512-272skein512-280skein512-288skein512-296skein512-304skein512-312skein512-320skein512-328skein512-336skein512-344skein512-352skein512-360skein512-368skein512-376skein512-384skein512-392skein512-400skein512-408skein512-416skein512-424skein512-432skein512-440skein512-448skein512-456skein512-464skein512-472skein512-480skein512-488skein512-496skein512-504skein512-512skein1024-8skein1024-16skein1024-24skein1024-32skein1024-40skein1024-48skein1024-56skein1024-64skein1024-72skein1024-80skein1024-88skein1024-96skein1024-104skein1024-112skein1024-120skein1024-128skein1024-136skein1024-144skein1024-152skein1024-160skein1024-168skein1024-176skein1024-184skein1024-192skein1024-200skein1024-208skein1024-216skein1024-224skein1024-232skein1024-240skein1024-248skein1024-256skein1024-264skein1024-272skein1024-280skein1024-288skein1024-296skein1024-304skein1024-312skein1024-320skein1024-328skein1024-336skein1024-344skein1024-352skein1024-360skein1024-368skein1024-376skein1024-384skein1024-392skein1024-400skein1024-408skein1024-416skein1024-424skein1024-432skein1024-440skein1024-448skein1024-456skein1024-464skein1024-472skein1024-480skein1024-488skein1024-496skein1024-504skein1024-512skein1024-520skein1024-528skein1024-536skein1024-544skein1024-552skein1024-560skein1024-568skein1024-576skein1024-584skein1024-592skein1024-600skein1024-608skein1024-616skein1024-624skein1024-632skein1024-640skein1024-648skein1024-656skein1024-664skein1024-672skein1024-680skein1024-688skein1024-696skein1024-704skein1024-712skein1024-720skein1024-728skein1024-736skein1024-744skein1024-752skein1024-760skein1024-768skein1024-776skein1024-784skein1024-792skein1024-800skein1024-808skein1024-816skein1024-824skein1024-832skein1024-840skein1024-848skein1024-856skein1024-864skein1024-872skein1024-880skein1024-888skein1024-896skein1024-904skein1024-912skein1024-920skein1024-928skein1024-936skein1024-944skein1024-952skein1024-960skein1024-968skein1024-976skein1024-984skein1024-992skein1024-1000skein1024-1008skein1024-1016skein1024-1024poseidon-bls12_381-a2-fc1poseidon-bls12_381-a2-fc1-sczeroxcert-imprint-256fil-commitment-unsealedfil-commitment-sealedplaintextv2holochain-adr-v0holochain-adr-v1holochain-key-v0holochain-key-v1holochain-sig-v0holochain-sig-v1skynet-nsarweave-nssubspace-ns"
+const _Code_name = "identitycidv1cidv2cidv3ip4tcpsha1sha2-256sha2-512sha3-512sha3-384sha3-256sha3-224shake-128shake-256keccak-224keccak-256keccak-384keccak-512blake3sha2-384dccpmurmur3-x64-64murmur3-32ip6ip6zoneipcidrpathmulticodecmultihashmultiaddrmultibasednsdns4dns6dnsaddrprotobufcborrawdbl-sha2-256rlpbencodedag-pbdag-cborlibp2p-keygit-rawtorrent-infotorrent-fileleofcoin-blockleofcoin-txleofcoin-prsctpdag-josedag-coseeth-blocketh-block-listeth-tx-trieeth-txeth-tx-receipt-trieeth-tx-receipteth-state-trieeth-account-snapshoteth-storage-trieeth-receipt-log-trieeth-reciept-logaes-128aes-192aes-256chacha-128chacha-256bitcoin-blockbitcoin-txbitcoin-witness-commitmentzcash-blockzcash-txcaip-50streamidstellar-blockstellar-txmd4md5decred-blockdecred-txipldipfsswarmipnszeronetsecp256k1-pubdnslinkbls12_381-g1-pubbls12_381-g2-pubx25519-pubed25519-pubbls12_381-g1g2-pubsr25519-pubdash-blockdash-txswarm-manifestswarm-feedbeesonudpp2p-webrtc-starp2p-webrtc-directp2p-stardustwebrtc-directwebrtcp2p-circuitdag-jsonudtutpcrc32crc64-ecmaunixthreadp2phttpsoniononion3garlic64garlic32tlssninoisequicquic-v1webtransportcerthashwswssp2p-websocket-starhttpswhid-1-snpjsonmessagepackcaripns-recordlibp2p-peer-recordlibp2p-relay-rsvpmemorytransportcar-index-sortedcar-multihash-index-sortedtransport-bitswaptransport-graphsync-filecoinv1transport-ipfs-gateway-httpmultididsha2-256-trunc254-paddedsha2-224sha2-512-224sha2-512-256murmur3-x64-128ripemd-128ripemd-160ripemd-256ripemd-320x11p256-pubp384-pubp521-pubed448-pubx448-pubrsa-pubsm2-pubed25519-privsecp256k1-privx25519-privsr25519-privrsa-privp256-privp384-privp521-privkangarootwelveaes-gcm-256silverpinesm3-256blake2b-8blake2b-16blake2b-24blake2b-32blake2b-40blake2b-48blake2b-56blake2b-64blake2b-72blake2b-80blake2b-88blake2b-96blake2b-104blake2b-112blake2b-120blake2b-128blake2b-136blake2b-144blake2b-152blake2b-160blake2b-168blake2b-176blake2b-184blake2b-192blake2b-200blake2b-208blake2b-216blake2b-224blake2b-232blake2b-240blake2b-248blake2b-256blake2b-264blake2b-272blake2b-280blake2b-288blake2b-296blake2b-304blake2b-312blake2b-320blake2b-328blake2b-336blake2b-344blake2b-352blake2b-360blake2b-368blake2b-376blake2b-384blake2b-392blake2b-400blake2b-408blake2b-416blake2b-424blake2b-432blake2b-440blake2b-448blake2b-456blake2b-464blake2b-472blake2b-480blake2b-488blake2b-496blake2b-504blake2b-512blake2s-8blake2s-16blake2s-24blake2s-32blake2s-40blake2s-48blake2s-56blake2s-64blake2s-72blake2s-80blake2s-88blake2s-96blake2s-104blake2s-112blake2s-120blake2s-128blake2s-136blake2s-144blake2s-152blake2s-160blake2s-168blake2s-176blake2s-184blake2s-192blake2s-200blake2s-208blake2s-216blake2s-224blake2s-232blake2s-240blake2s-248blake2s-256skein256-8skein256-16skein256-24skein256-32skein256-40skein256-48skein256-56skein256-64skein256-72skein256-80skein256-88skein256-96skein256-104skein256-112skein256-120skein256-128skein256-136skein256-144skein256-152skein256-160skein256-168skein256-176skein256-184skein256-192skein256-200skein256-208skein256-216skein256-224skein256-232skein256-240skein256-248skein256-256skein512-8skein512-16skein512-24skein512-32skein512-40skein512-48skein512-56skein512-64skein512-72skein512-80skein512-88skein512-96skein512-104skein512-112skein512-120skein512-128skein512-136skein512-144skein512-152skein512-160skein512-168skein512-176skein512-184skein512-192skein512-200skein512-208skein512-216skein512-224skein512-232skein512-240skein512-248skein512-256skein512-264skein512-272skein512-280skein512-288skein512-296skein512-304skein512-312skein512-320skein512-328skein512-336skein512-344skein512-352skein512-360skein512-368skein512-376skein512-384skein512-392skein512-400skein512-408skein512-416skein512-424skein512-432skein512-440skein512-448skein512-456skein512-464skein512-472skein512-480skein512-488skein512-496skein512-504skein512-512skein1024-8skein1024-16skein1024-24skein1024-32skein1024-40skein1024-48skein1024-56skein1024-64skein1024-72skein1024-80skein1024-88skein1024-96skein1024-104skein1024-112skein1024-120skein1024-128skein1024-136skein1024-144skein1024-152skein1024-160skein1024-168skein1024-176skein1024-184skein1024-192skein1024-200skein1024-208skein1024-216skein1024-224skein1024-232skein1024-240skein1024-248skein1024-256skein1024-264skein1024-272skein1024-280skein1024-288skein1024-296skein1024-304skein1024-312skein1024-320skein1024-328skein1024-336skein1024-344skein1024-352skein1024-360skein1024-368skein1024-376skein1024-384skein1024-392skein1024-400skein1024-408skein1024-416skein1024-424skein1024-432skein1024-440skein1024-448skein1024-456skein1024-464skein1024-472skein1024-480skein1024-488skein1024-496skein1024-504skein1024-512skein1024-520skein1024-528skein1024-536skein1024-544skein1024-552skein1024-560skein1024-568skein1024-576skein1024-584skein1024-592skein1024-600skein1024-608skein1024-616skein1024-624skein1024-632skein1024-640skein1024-648skein1024-656skein1024-664skein1024-672skein1024-680skein1024-688skein1024-696skein1024-704skein1024-712skein1024-720skein1024-728skein1024-736skein1024-744skein1024-752skein1024-760skein1024-768skein1024-776skein1024-784skein1024-792skein1024-800skein1024-808skein1024-816skein1024-824skein1024-832skein1024-840skein1024-848skein1024-856skein1024-864skein1024-872skein1024-880skein1024-888skein1024-896skein1024-904skein1024-912skein1024-920skein1024-928skein1024-936skein1024-944skein1024-952skein1024-960skein1024-968skein1024-976skein1024-984skein1024-992skein1024-1000skein1024-1008skein1024-1016skein1024-1024xxh-32xxh-64xxh3-64xxh3-128poseidon-bls12_381-a2-fc1poseidon-bls12_381-a2-fc1-scurdca-2015-canonsszssz-sha2-256-bmtjson-jcsiscczeroxcert-imprint-256varsiges256kbls-12381-g1-sigbls-12381-g2-sigeddsaeip-191jwk_jcs-pubfil-commitment-unsealedfil-commitment-sealedplaintextv2holochain-adr-v0holochain-adr-v1holochain-key-v0holochain-key-v1holochain-sig-v0holochain-sig-v1skynet-nsarweave-nssubspace-nskumandra-nses256es284es512rs256"
var _Code_map = map[Code]string{
0: _Code_name[0:8],
@@ -527,467 +570,511 @@ var _Code_map = map[Code]string{
35: _Code_name[171:181],
41: _Code_name[181:184],
42: _Code_name[184:191],
- 47: _Code_name[191:195],
- 48: _Code_name[195:205],
- 49: _Code_name[205:214],
- 50: _Code_name[214:223],
- 51: _Code_name[223:232],
- 53: _Code_name[232:235],
- 54: _Code_name[235:239],
- 55: _Code_name[239:243],
- 56: _Code_name[243:250],
- 80: _Code_name[250:258],
- 81: _Code_name[258:262],
- 85: _Code_name[262:265],
- 86: _Code_name[265:277],
- 96: _Code_name[277:280],
- 99: _Code_name[280:287],
- 112: _Code_name[287:293],
- 113: _Code_name[293:301],
- 114: _Code_name[301:311],
- 120: _Code_name[311:318],
- 123: _Code_name[318:330],
- 124: _Code_name[330:342],
- 129: _Code_name[342:356],
- 130: _Code_name[356:367],
- 131: _Code_name[367:378],
- 132: _Code_name[378:382],
- 133: _Code_name[382:390],
- 134: _Code_name[390:398],
- 144: _Code_name[398:407],
- 145: _Code_name[407:421],
- 146: _Code_name[421:432],
- 147: _Code_name[432:438],
- 148: _Code_name[438:457],
- 149: _Code_name[457:471],
- 150: _Code_name[471:485],
- 151: _Code_name[485:505],
- 152: _Code_name[505:521],
- 153: _Code_name[521:541],
- 154: _Code_name[541:556],
- 160: _Code_name[556:563],
- 161: _Code_name[563:570],
- 162: _Code_name[570:577],
- 163: _Code_name[577:587],
- 164: _Code_name[587:597],
- 176: _Code_name[597:610],
- 177: _Code_name[610:620],
- 178: _Code_name[620:646],
- 192: _Code_name[646:657],
- 193: _Code_name[657:665],
- 202: _Code_name[665:672],
- 206: _Code_name[672:680],
- 208: _Code_name[680:693],
- 209: _Code_name[693:703],
- 212: _Code_name[703:706],
- 213: _Code_name[706:709],
- 214: _Code_name[709:712],
- 224: _Code_name[712:724],
- 225: _Code_name[724:733],
- 226: _Code_name[733:740],
- 227: _Code_name[740:747],
- 228: _Code_name[747:755],
- 229: _Code_name[755:762],
- 230: _Code_name[762:769],
- 231: _Code_name[769:782],
- 234: _Code_name[782:798],
- 235: _Code_name[798:814],
- 236: _Code_name[814:824],
- 237: _Code_name[824:835],
- 238: _Code_name[835:853],
- 240: _Code_name[853:863],
- 241: _Code_name[863:870],
- 250: _Code_name[870:884],
- 251: _Code_name[884:894],
- 273: _Code_name[894:897],
- 275: _Code_name[897:912],
- 276: _Code_name[912:929],
- 277: _Code_name[929:941],
- 290: _Code_name[941:952],
- 297: _Code_name[952:960],
- 301: _Code_name[960:963],
- 302: _Code_name[963:966],
- 400: _Code_name[966:970],
- 406: _Code_name[970:976],
- 421: _Code_name[976:979],
- 443: _Code_name[979:984],
- 444: _Code_name[984:989],
- 445: _Code_name[989:995],
- 446: _Code_name[995:1003],
- 447: _Code_name[1003:1011],
- 448: _Code_name[1011:1014],
- 454: _Code_name[1014:1019],
- 460: _Code_name[1019:1023],
- 477: _Code_name[1023:1025],
- 478: _Code_name[1025:1028],
- 479: _Code_name[1028:1046],
- 480: _Code_name[1046:1050],
- 496: _Code_name[1050:1061],
- 512: _Code_name[1061:1065],
- 513: _Code_name[1065:1076],
- 514: _Code_name[1076:1079],
- 769: _Code_name[1079:1097],
- 770: _Code_name[1097:1114],
- 1024: _Code_name[1114:1130],
- 1025: _Code_name[1130:1156],
- 2304: _Code_name[1156:1173],
- 2320: _Code_name[1173:1203],
- 4114: _Code_name[1203:1227],
- 4115: _Code_name[1227:1235],
- 4116: _Code_name[1235:1247],
- 4117: _Code_name[1247:1259],
- 4130: _Code_name[1259:1274],
- 4178: _Code_name[1274:1284],
- 4179: _Code_name[1284:1294],
- 4180: _Code_name[1294:1304],
- 4181: _Code_name[1304:1314],
- 4352: _Code_name[1314:1317],
- 4608: _Code_name[1317:1325],
- 4609: _Code_name[1325:1333],
- 4610: _Code_name[1333:1341],
- 4611: _Code_name[1341:1350],
- 4612: _Code_name[1350:1358],
- 4613: _Code_name[1358:1365],
- 4864: _Code_name[1365:1377],
- 4865: _Code_name[1377:1391],
- 4866: _Code_name[1391:1402],
- 7425: _Code_name[1402:1416],
- 21325: _Code_name[1416:1423],
- 45569: _Code_name[1423:1432],
- 45570: _Code_name[1432:1442],
- 45571: _Code_name[1442:1452],
- 45572: _Code_name[1452:1462],
- 45573: _Code_name[1462:1472],
- 45574: _Code_name[1472:1482],
- 45575: _Code_name[1482:1492],
- 45576: _Code_name[1492:1502],
- 45577: _Code_name[1502:1512],
- 45578: _Code_name[1512:1522],
- 45579: _Code_name[1522:1532],
- 45580: _Code_name[1532:1542],
- 45581: _Code_name[1542:1553],
- 45582: _Code_name[1553:1564],
- 45583: _Code_name[1564:1575],
- 45584: _Code_name[1575:1586],
- 45585: _Code_name[1586:1597],
- 45586: _Code_name[1597:1608],
- 45587: _Code_name[1608:1619],
- 45588: _Code_name[1619:1630],
- 45589: _Code_name[1630:1641],
- 45590: _Code_name[1641:1652],
- 45591: _Code_name[1652:1663],
- 45592: _Code_name[1663:1674],
- 45593: _Code_name[1674:1685],
- 45594: _Code_name[1685:1696],
- 45595: _Code_name[1696:1707],
- 45596: _Code_name[1707:1718],
- 45597: _Code_name[1718:1729],
- 45598: _Code_name[1729:1740],
- 45599: _Code_name[1740:1751],
- 45600: _Code_name[1751:1762],
- 45601: _Code_name[1762:1773],
- 45602: _Code_name[1773:1784],
- 45603: _Code_name[1784:1795],
- 45604: _Code_name[1795:1806],
- 45605: _Code_name[1806:1817],
- 45606: _Code_name[1817:1828],
- 45607: _Code_name[1828:1839],
- 45608: _Code_name[1839:1850],
- 45609: _Code_name[1850:1861],
- 45610: _Code_name[1861:1872],
- 45611: _Code_name[1872:1883],
- 45612: _Code_name[1883:1894],
- 45613: _Code_name[1894:1905],
- 45614: _Code_name[1905:1916],
- 45615: _Code_name[1916:1927],
- 45616: _Code_name[1927:1938],
- 45617: _Code_name[1938:1949],
- 45618: _Code_name[1949:1960],
- 45619: _Code_name[1960:1971],
- 45620: _Code_name[1971:1982],
- 45621: _Code_name[1982:1993],
- 45622: _Code_name[1993:2004],
- 45623: _Code_name[2004:2015],
- 45624: _Code_name[2015:2026],
- 45625: _Code_name[2026:2037],
- 45626: _Code_name[2037:2048],
- 45627: _Code_name[2048:2059],
- 45628: _Code_name[2059:2070],
- 45629: _Code_name[2070:2081],
- 45630: _Code_name[2081:2092],
- 45631: _Code_name[2092:2103],
- 45632: _Code_name[2103:2114],
- 45633: _Code_name[2114:2123],
- 45634: _Code_name[2123:2133],
- 45635: _Code_name[2133:2143],
- 45636: _Code_name[2143:2153],
- 45637: _Code_name[2153:2163],
- 45638: _Code_name[2163:2173],
- 45639: _Code_name[2173:2183],
- 45640: _Code_name[2183:2193],
- 45641: _Code_name[2193:2203],
- 45642: _Code_name[2203:2213],
- 45643: _Code_name[2213:2223],
- 45644: _Code_name[2223:2233],
- 45645: _Code_name[2233:2244],
- 45646: _Code_name[2244:2255],
- 45647: _Code_name[2255:2266],
- 45648: _Code_name[2266:2277],
- 45649: _Code_name[2277:2288],
- 45650: _Code_name[2288:2299],
- 45651: _Code_name[2299:2310],
- 45652: _Code_name[2310:2321],
- 45653: _Code_name[2321:2332],
- 45654: _Code_name[2332:2343],
- 45655: _Code_name[2343:2354],
- 45656: _Code_name[2354:2365],
- 45657: _Code_name[2365:2376],
- 45658: _Code_name[2376:2387],
- 45659: _Code_name[2387:2398],
- 45660: _Code_name[2398:2409],
- 45661: _Code_name[2409:2420],
- 45662: _Code_name[2420:2431],
- 45663: _Code_name[2431:2442],
- 45664: _Code_name[2442:2453],
- 45825: _Code_name[2453:2463],
- 45826: _Code_name[2463:2474],
- 45827: _Code_name[2474:2485],
- 45828: _Code_name[2485:2496],
- 45829: _Code_name[2496:2507],
- 45830: _Code_name[2507:2518],
- 45831: _Code_name[2518:2529],
- 45832: _Code_name[2529:2540],
- 45833: _Code_name[2540:2551],
- 45834: _Code_name[2551:2562],
- 45835: _Code_name[2562:2573],
- 45836: _Code_name[2573:2584],
- 45837: _Code_name[2584:2596],
- 45838: _Code_name[2596:2608],
- 45839: _Code_name[2608:2620],
- 45840: _Code_name[2620:2632],
- 45841: _Code_name[2632:2644],
- 45842: _Code_name[2644:2656],
- 45843: _Code_name[2656:2668],
- 45844: _Code_name[2668:2680],
- 45845: _Code_name[2680:2692],
- 45846: _Code_name[2692:2704],
- 45847: _Code_name[2704:2716],
- 45848: _Code_name[2716:2728],
- 45849: _Code_name[2728:2740],
- 45850: _Code_name[2740:2752],
- 45851: _Code_name[2752:2764],
- 45852: _Code_name[2764:2776],
- 45853: _Code_name[2776:2788],
- 45854: _Code_name[2788:2800],
- 45855: _Code_name[2800:2812],
- 45856: _Code_name[2812:2824],
- 45857: _Code_name[2824:2834],
- 45858: _Code_name[2834:2845],
- 45859: _Code_name[2845:2856],
- 45860: _Code_name[2856:2867],
- 45861: _Code_name[2867:2878],
- 45862: _Code_name[2878:2889],
- 45863: _Code_name[2889:2900],
- 45864: _Code_name[2900:2911],
- 45865: _Code_name[2911:2922],
- 45866: _Code_name[2922:2933],
- 45867: _Code_name[2933:2944],
- 45868: _Code_name[2944:2955],
- 45869: _Code_name[2955:2967],
- 45870: _Code_name[2967:2979],
- 45871: _Code_name[2979:2991],
- 45872: _Code_name[2991:3003],
- 45873: _Code_name[3003:3015],
- 45874: _Code_name[3015:3027],
- 45875: _Code_name[3027:3039],
- 45876: _Code_name[3039:3051],
- 45877: _Code_name[3051:3063],
- 45878: _Code_name[3063:3075],
- 45879: _Code_name[3075:3087],
- 45880: _Code_name[3087:3099],
- 45881: _Code_name[3099:3111],
- 45882: _Code_name[3111:3123],
- 45883: _Code_name[3123:3135],
- 45884: _Code_name[3135:3147],
- 45885: _Code_name[3147:3159],
- 45886: _Code_name[3159:3171],
- 45887: _Code_name[3171:3183],
- 45888: _Code_name[3183:3195],
- 45889: _Code_name[3195:3207],
- 45890: _Code_name[3207:3219],
- 45891: _Code_name[3219:3231],
- 45892: _Code_name[3231:3243],
- 45893: _Code_name[3243:3255],
- 45894: _Code_name[3255:3267],
- 45895: _Code_name[3267:3279],
- 45896: _Code_name[3279:3291],
- 45897: _Code_name[3291:3303],
- 45898: _Code_name[3303:3315],
- 45899: _Code_name[3315:3327],
- 45900: _Code_name[3327:3339],
- 45901: _Code_name[3339:3351],
- 45902: _Code_name[3351:3363],
- 45903: _Code_name[3363:3375],
- 45904: _Code_name[3375:3387],
- 45905: _Code_name[3387:3399],
- 45906: _Code_name[3399:3411],
- 45907: _Code_name[3411:3423],
- 45908: _Code_name[3423:3435],
- 45909: _Code_name[3435:3447],
- 45910: _Code_name[3447:3459],
- 45911: _Code_name[3459:3471],
- 45912: _Code_name[3471:3483],
- 45913: _Code_name[3483:3495],
- 45914: _Code_name[3495:3507],
- 45915: _Code_name[3507:3519],
- 45916: _Code_name[3519:3531],
- 45917: _Code_name[3531:3543],
- 45918: _Code_name[3543:3555],
- 45919: _Code_name[3555:3567],
- 45920: _Code_name[3567:3579],
- 45921: _Code_name[3579:3590],
- 45922: _Code_name[3590:3602],
- 45923: _Code_name[3602:3614],
- 45924: _Code_name[3614:3626],
- 45925: _Code_name[3626:3638],
- 45926: _Code_name[3638:3650],
- 45927: _Code_name[3650:3662],
- 45928: _Code_name[3662:3674],
- 45929: _Code_name[3674:3686],
- 45930: _Code_name[3686:3698],
- 45931: _Code_name[3698:3710],
- 45932: _Code_name[3710:3722],
- 45933: _Code_name[3722:3735],
- 45934: _Code_name[3735:3748],
- 45935: _Code_name[3748:3761],
- 45936: _Code_name[3761:3774],
- 45937: _Code_name[3774:3787],
- 45938: _Code_name[3787:3800],
- 45939: _Code_name[3800:3813],
- 45940: _Code_name[3813:3826],
- 45941: _Code_name[3826:3839],
- 45942: _Code_name[3839:3852],
- 45943: _Code_name[3852:3865],
- 45944: _Code_name[3865:3878],
- 45945: _Code_name[3878:3891],
- 45946: _Code_name[3891:3904],
- 45947: _Code_name[3904:3917],
- 45948: _Code_name[3917:3930],
- 45949: _Code_name[3930:3943],
- 45950: _Code_name[3943:3956],
- 45951: _Code_name[3956:3969],
- 45952: _Code_name[3969:3982],
- 45953: _Code_name[3982:3995],
- 45954: _Code_name[3995:4008],
- 45955: _Code_name[4008:4021],
- 45956: _Code_name[4021:4034],
- 45957: _Code_name[4034:4047],
- 45958: _Code_name[4047:4060],
- 45959: _Code_name[4060:4073],
- 45960: _Code_name[4073:4086],
- 45961: _Code_name[4086:4099],
- 45962: _Code_name[4099:4112],
- 45963: _Code_name[4112:4125],
- 45964: _Code_name[4125:4138],
- 45965: _Code_name[4138:4151],
- 45966: _Code_name[4151:4164],
- 45967: _Code_name[4164:4177],
- 45968: _Code_name[4177:4190],
- 45969: _Code_name[4190:4203],
- 45970: _Code_name[4203:4216],
- 45971: _Code_name[4216:4229],
- 45972: _Code_name[4229:4242],
- 45973: _Code_name[4242:4255],
- 45974: _Code_name[4255:4268],
- 45975: _Code_name[4268:4281],
- 45976: _Code_name[4281:4294],
- 45977: _Code_name[4294:4307],
- 45978: _Code_name[4307:4320],
- 45979: _Code_name[4320:4333],
- 45980: _Code_name[4333:4346],
- 45981: _Code_name[4346:4359],
- 45982: _Code_name[4359:4372],
- 45983: _Code_name[4372:4385],
- 45984: _Code_name[4385:4398],
- 45985: _Code_name[4398:4411],
- 45986: _Code_name[4411:4424],
- 45987: _Code_name[4424:4437],
- 45988: _Code_name[4437:4450],
- 45989: _Code_name[4450:4463],
- 45990: _Code_name[4463:4476],
- 45991: _Code_name[4476:4489],
- 45992: _Code_name[4489:4502],
- 45993: _Code_name[4502:4515],
- 45994: _Code_name[4515:4528],
- 45995: _Code_name[4528:4541],
- 45996: _Code_name[4541:4554],
- 45997: _Code_name[4554:4567],
- 45998: _Code_name[4567:4580],
- 45999: _Code_name[4580:4593],
- 46000: _Code_name[4593:4606],
- 46001: _Code_name[4606:4619],
- 46002: _Code_name[4619:4632],
- 46003: _Code_name[4632:4645],
- 46004: _Code_name[4645:4658],
- 46005: _Code_name[4658:4671],
- 46006: _Code_name[4671:4684],
- 46007: _Code_name[4684:4697],
- 46008: _Code_name[4697:4710],
- 46009: _Code_name[4710:4723],
- 46010: _Code_name[4723:4736],
- 46011: _Code_name[4736:4749],
- 46012: _Code_name[4749:4762],
- 46013: _Code_name[4762:4775],
- 46014: _Code_name[4775:4788],
- 46015: _Code_name[4788:4801],
- 46016: _Code_name[4801:4814],
- 46017: _Code_name[4814:4827],
- 46018: _Code_name[4827:4840],
- 46019: _Code_name[4840:4853],
- 46020: _Code_name[4853:4866],
- 46021: _Code_name[4866:4879],
- 46022: _Code_name[4879:4892],
- 46023: _Code_name[4892:4905],
- 46024: _Code_name[4905:4918],
- 46025: _Code_name[4918:4931],
- 46026: _Code_name[4931:4944],
- 46027: _Code_name[4944:4957],
- 46028: _Code_name[4957:4970],
- 46029: _Code_name[4970:4983],
- 46030: _Code_name[4983:4996],
- 46031: _Code_name[4996:5009],
- 46032: _Code_name[5009:5022],
- 46033: _Code_name[5022:5035],
- 46034: _Code_name[5035:5048],
- 46035: _Code_name[5048:5061],
- 46036: _Code_name[5061:5074],
- 46037: _Code_name[5074:5087],
- 46038: _Code_name[5087:5100],
- 46039: _Code_name[5100:5113],
- 46040: _Code_name[5113:5126],
- 46041: _Code_name[5126:5139],
- 46042: _Code_name[5139:5152],
- 46043: _Code_name[5152:5165],
- 46044: _Code_name[5165:5178],
- 46045: _Code_name[5178:5192],
- 46046: _Code_name[5192:5206],
- 46047: _Code_name[5206:5220],
- 46048: _Code_name[5220:5234],
- 46081: _Code_name[5234:5259],
- 46082: _Code_name[5259:5287],
- 52753: _Code_name[5287:5308],
- 61697: _Code_name[5308:5331],
- 61698: _Code_name[5331:5352],
- 7367777: _Code_name[5352:5363],
- 8417572: _Code_name[5363:5379],
- 8483108: _Code_name[5379:5395],
- 9728292: _Code_name[5395:5411],
- 9793828: _Code_name[5411:5427],
- 10645796: _Code_name[5427:5443],
- 10711332: _Code_name[5443:5459],
- 11639056: _Code_name[5459:5468],
- 11704592: _Code_name[5468:5478],
- 11770128: _Code_name[5478:5489],
+ 43: _Code_name[191:197],
+ 47: _Code_name[197:201],
+ 48: _Code_name[201:211],
+ 49: _Code_name[211:220],
+ 50: _Code_name[220:229],
+ 51: _Code_name[229:238],
+ 53: _Code_name[238:241],
+ 54: _Code_name[241:245],
+ 55: _Code_name[245:249],
+ 56: _Code_name[249:256],
+ 80: _Code_name[256:264],
+ 81: _Code_name[264:268],
+ 85: _Code_name[268:271],
+ 86: _Code_name[271:283],
+ 96: _Code_name[283:286],
+ 99: _Code_name[286:293],
+ 112: _Code_name[293:299],
+ 113: _Code_name[299:307],
+ 114: _Code_name[307:317],
+ 120: _Code_name[317:324],
+ 123: _Code_name[324:336],
+ 124: _Code_name[336:348],
+ 129: _Code_name[348:362],
+ 130: _Code_name[362:373],
+ 131: _Code_name[373:384],
+ 132: _Code_name[384:388],
+ 133: _Code_name[388:396],
+ 134: _Code_name[396:404],
+ 144: _Code_name[404:413],
+ 145: _Code_name[413:427],
+ 146: _Code_name[427:438],
+ 147: _Code_name[438:444],
+ 148: _Code_name[444:463],
+ 149: _Code_name[463:477],
+ 150: _Code_name[477:491],
+ 151: _Code_name[491:511],
+ 152: _Code_name[511:527],
+ 153: _Code_name[527:547],
+ 154: _Code_name[547:562],
+ 160: _Code_name[562:569],
+ 161: _Code_name[569:576],
+ 162: _Code_name[576:583],
+ 163: _Code_name[583:593],
+ 164: _Code_name[593:603],
+ 176: _Code_name[603:616],
+ 177: _Code_name[616:626],
+ 178: _Code_name[626:652],
+ 192: _Code_name[652:663],
+ 193: _Code_name[663:671],
+ 202: _Code_name[671:678],
+ 206: _Code_name[678:686],
+ 208: _Code_name[686:699],
+ 209: _Code_name[699:709],
+ 212: _Code_name[709:712],
+ 213: _Code_name[712:715],
+ 224: _Code_name[715:727],
+ 225: _Code_name[727:736],
+ 226: _Code_name[736:740],
+ 227: _Code_name[740:744],
+ 228: _Code_name[744:749],
+ 229: _Code_name[749:753],
+ 230: _Code_name[753:760],
+ 231: _Code_name[760:773],
+ 232: _Code_name[773:780],
+ 234: _Code_name[780:796],
+ 235: _Code_name[796:812],
+ 236: _Code_name[812:822],
+ 237: _Code_name[822:833],
+ 238: _Code_name[833:851],
+ 239: _Code_name[851:862],
+ 240: _Code_name[862:872],
+ 241: _Code_name[872:879],
+ 250: _Code_name[879:893],
+ 251: _Code_name[893:903],
+ 252: _Code_name[903:909],
+ 273: _Code_name[909:912],
+ 275: _Code_name[912:927],
+ 276: _Code_name[927:944],
+ 277: _Code_name[944:956],
+ 280: _Code_name[956:969],
+ 281: _Code_name[969:975],
+ 290: _Code_name[975:986],
+ 297: _Code_name[986:994],
+ 301: _Code_name[994:997],
+ 302: _Code_name[997:1000],
+ 306: _Code_name[1000:1005],
+ 356: _Code_name[1005:1015],
+ 400: _Code_name[1015:1019],
+ 406: _Code_name[1019:1025],
+ 421: _Code_name[1025:1028],
+ 443: _Code_name[1028:1033],
+ 444: _Code_name[1033:1038],
+ 445: _Code_name[1038:1044],
+ 446: _Code_name[1044:1052],
+ 447: _Code_name[1052:1060],
+ 448: _Code_name[1060:1063],
+ 449: _Code_name[1063:1066],
+ 454: _Code_name[1066:1071],
+ 460: _Code_name[1071:1075],
+ 461: _Code_name[1075:1082],
+ 465: _Code_name[1082:1094],
+ 466: _Code_name[1094:1102],
+ 477: _Code_name[1102:1104],
+ 478: _Code_name[1104:1107],
+ 479: _Code_name[1107:1125],
+ 480: _Code_name[1125:1129],
+ 496: _Code_name[1129:1140],
+ 512: _Code_name[1140:1144],
+ 513: _Code_name[1144:1155],
+ 514: _Code_name[1155:1158],
+ 768: _Code_name[1158:1169],
+ 769: _Code_name[1169:1187],
+ 770: _Code_name[1187:1204],
+ 777: _Code_name[1204:1219],
+ 1024: _Code_name[1219:1235],
+ 1025: _Code_name[1235:1261],
+ 2304: _Code_name[1261:1278],
+ 2320: _Code_name[1278:1308],
+ 2336: _Code_name[1308:1335],
+ 3357: _Code_name[1335:1343],
+ 4114: _Code_name[1343:1367],
+ 4115: _Code_name[1367:1375],
+ 4116: _Code_name[1375:1387],
+ 4117: _Code_name[1387:1399],
+ 4130: _Code_name[1399:1414],
+ 4178: _Code_name[1414:1424],
+ 4179: _Code_name[1424:1434],
+ 4180: _Code_name[1434:1444],
+ 4181: _Code_name[1444:1454],
+ 4352: _Code_name[1454:1457],
+ 4608: _Code_name[1457:1465],
+ 4609: _Code_name[1465:1473],
+ 4610: _Code_name[1473:1481],
+ 4611: _Code_name[1481:1490],
+ 4612: _Code_name[1490:1498],
+ 4613: _Code_name[1498:1505],
+ 4614: _Code_name[1505:1512],
+ 4864: _Code_name[1512:1524],
+ 4865: _Code_name[1524:1538],
+ 4866: _Code_name[1538:1549],
+ 4867: _Code_name[1549:1561],
+ 4869: _Code_name[1561:1569],
+ 4870: _Code_name[1569:1578],
+ 4871: _Code_name[1578:1587],
+ 4872: _Code_name[1587:1596],
+ 7425: _Code_name[1596:1610],
+ 8192: _Code_name[1610:1621],
+ 16194: _Code_name[1621:1631],
+ 21325: _Code_name[1631:1638],
+ 45569: _Code_name[1638:1647],
+ 45570: _Code_name[1647:1657],
+ 45571: _Code_name[1657:1667],
+ 45572: _Code_name[1667:1677],
+ 45573: _Code_name[1677:1687],
+ 45574: _Code_name[1687:1697],
+ 45575: _Code_name[1697:1707],
+ 45576: _Code_name[1707:1717],
+ 45577: _Code_name[1717:1727],
+ 45578: _Code_name[1727:1737],
+ 45579: _Code_name[1737:1747],
+ 45580: _Code_name[1747:1757],
+ 45581: _Code_name[1757:1768],
+ 45582: _Code_name[1768:1779],
+ 45583: _Code_name[1779:1790],
+ 45584: _Code_name[1790:1801],
+ 45585: _Code_name[1801:1812],
+ 45586: _Code_name[1812:1823],
+ 45587: _Code_name[1823:1834],
+ 45588: _Code_name[1834:1845],
+ 45589: _Code_name[1845:1856],
+ 45590: _Code_name[1856:1867],
+ 45591: _Code_name[1867:1878],
+ 45592: _Code_name[1878:1889],
+ 45593: _Code_name[1889:1900],
+ 45594: _Code_name[1900:1911],
+ 45595: _Code_name[1911:1922],
+ 45596: _Code_name[1922:1933],
+ 45597: _Code_name[1933:1944],
+ 45598: _Code_name[1944:1955],
+ 45599: _Code_name[1955:1966],
+ 45600: _Code_name[1966:1977],
+ 45601: _Code_name[1977:1988],
+ 45602: _Code_name[1988:1999],
+ 45603: _Code_name[1999:2010],
+ 45604: _Code_name[2010:2021],
+ 45605: _Code_name[2021:2032],
+ 45606: _Code_name[2032:2043],
+ 45607: _Code_name[2043:2054],
+ 45608: _Code_name[2054:2065],
+ 45609: _Code_name[2065:2076],
+ 45610: _Code_name[2076:2087],
+ 45611: _Code_name[2087:2098],
+ 45612: _Code_name[2098:2109],
+ 45613: _Code_name[2109:2120],
+ 45614: _Code_name[2120:2131],
+ 45615: _Code_name[2131:2142],
+ 45616: _Code_name[2142:2153],
+ 45617: _Code_name[2153:2164],
+ 45618: _Code_name[2164:2175],
+ 45619: _Code_name[2175:2186],
+ 45620: _Code_name[2186:2197],
+ 45621: _Code_name[2197:2208],
+ 45622: _Code_name[2208:2219],
+ 45623: _Code_name[2219:2230],
+ 45624: _Code_name[2230:2241],
+ 45625: _Code_name[2241:2252],
+ 45626: _Code_name[2252:2263],
+ 45627: _Code_name[2263:2274],
+ 45628: _Code_name[2274:2285],
+ 45629: _Code_name[2285:2296],
+ 45630: _Code_name[2296:2307],
+ 45631: _Code_name[2307:2318],
+ 45632: _Code_name[2318:2329],
+ 45633: _Code_name[2329:2338],
+ 45634: _Code_name[2338:2348],
+ 45635: _Code_name[2348:2358],
+ 45636: _Code_name[2358:2368],
+ 45637: _Code_name[2368:2378],
+ 45638: _Code_name[2378:2388],
+ 45639: _Code_name[2388:2398],
+ 45640: _Code_name[2398:2408],
+ 45641: _Code_name[2408:2418],
+ 45642: _Code_name[2418:2428],
+ 45643: _Code_name[2428:2438],
+ 45644: _Code_name[2438:2448],
+ 45645: _Code_name[2448:2459],
+ 45646: _Code_name[2459:2470],
+ 45647: _Code_name[2470:2481],
+ 45648: _Code_name[2481:2492],
+ 45649: _Code_name[2492:2503],
+ 45650: _Code_name[2503:2514],
+ 45651: _Code_name[2514:2525],
+ 45652: _Code_name[2525:2536],
+ 45653: _Code_name[2536:2547],
+ 45654: _Code_name[2547:2558],
+ 45655: _Code_name[2558:2569],
+ 45656: _Code_name[2569:2580],
+ 45657: _Code_name[2580:2591],
+ 45658: _Code_name[2591:2602],
+ 45659: _Code_name[2602:2613],
+ 45660: _Code_name[2613:2624],
+ 45661: _Code_name[2624:2635],
+ 45662: _Code_name[2635:2646],
+ 45663: _Code_name[2646:2657],
+ 45664: _Code_name[2657:2668],
+ 45825: _Code_name[2668:2678],
+ 45826: _Code_name[2678:2689],
+ 45827: _Code_name[2689:2700],
+ 45828: _Code_name[2700:2711],
+ 45829: _Code_name[2711:2722],
+ 45830: _Code_name[2722:2733],
+ 45831: _Code_name[2733:2744],
+ 45832: _Code_name[2744:2755],
+ 45833: _Code_name[2755:2766],
+ 45834: _Code_name[2766:2777],
+ 45835: _Code_name[2777:2788],
+ 45836: _Code_name[2788:2799],
+ 45837: _Code_name[2799:2811],
+ 45838: _Code_name[2811:2823],
+ 45839: _Code_name[2823:2835],
+ 45840: _Code_name[2835:2847],
+ 45841: _Code_name[2847:2859],
+ 45842: _Code_name[2859:2871],
+ 45843: _Code_name[2871:2883],
+ 45844: _Code_name[2883:2895],
+ 45845: _Code_name[2895:2907],
+ 45846: _Code_name[2907:2919],
+ 45847: _Code_name[2919:2931],
+ 45848: _Code_name[2931:2943],
+ 45849: _Code_name[2943:2955],
+ 45850: _Code_name[2955:2967],
+ 45851: _Code_name[2967:2979],
+ 45852: _Code_name[2979:2991],
+ 45853: _Code_name[2991:3003],
+ 45854: _Code_name[3003:3015],
+ 45855: _Code_name[3015:3027],
+ 45856: _Code_name[3027:3039],
+ 45857: _Code_name[3039:3049],
+ 45858: _Code_name[3049:3060],
+ 45859: _Code_name[3060:3071],
+ 45860: _Code_name[3071:3082],
+ 45861: _Code_name[3082:3093],
+ 45862: _Code_name[3093:3104],
+ 45863: _Code_name[3104:3115],
+ 45864: _Code_name[3115:3126],
+ 45865: _Code_name[3126:3137],
+ 45866: _Code_name[3137:3148],
+ 45867: _Code_name[3148:3159],
+ 45868: _Code_name[3159:3170],
+ 45869: _Code_name[3170:3182],
+ 45870: _Code_name[3182:3194],
+ 45871: _Code_name[3194:3206],
+ 45872: _Code_name[3206:3218],
+ 45873: _Code_name[3218:3230],
+ 45874: _Code_name[3230:3242],
+ 45875: _Code_name[3242:3254],
+ 45876: _Code_name[3254:3266],
+ 45877: _Code_name[3266:3278],
+ 45878: _Code_name[3278:3290],
+ 45879: _Code_name[3290:3302],
+ 45880: _Code_name[3302:3314],
+ 45881: _Code_name[3314:3326],
+ 45882: _Code_name[3326:3338],
+ 45883: _Code_name[3338:3350],
+ 45884: _Code_name[3350:3362],
+ 45885: _Code_name[3362:3374],
+ 45886: _Code_name[3374:3386],
+ 45887: _Code_name[3386:3398],
+ 45888: _Code_name[3398:3410],
+ 45889: _Code_name[3410:3422],
+ 45890: _Code_name[3422:3434],
+ 45891: _Code_name[3434:3446],
+ 45892: _Code_name[3446:3458],
+ 45893: _Code_name[3458:3470],
+ 45894: _Code_name[3470:3482],
+ 45895: _Code_name[3482:3494],
+ 45896: _Code_name[3494:3506],
+ 45897: _Code_name[3506:3518],
+ 45898: _Code_name[3518:3530],
+ 45899: _Code_name[3530:3542],
+ 45900: _Code_name[3542:3554],
+ 45901: _Code_name[3554:3566],
+ 45902: _Code_name[3566:3578],
+ 45903: _Code_name[3578:3590],
+ 45904: _Code_name[3590:3602],
+ 45905: _Code_name[3602:3614],
+ 45906: _Code_name[3614:3626],
+ 45907: _Code_name[3626:3638],
+ 45908: _Code_name[3638:3650],
+ 45909: _Code_name[3650:3662],
+ 45910: _Code_name[3662:3674],
+ 45911: _Code_name[3674:3686],
+ 45912: _Code_name[3686:3698],
+ 45913: _Code_name[3698:3710],
+ 45914: _Code_name[3710:3722],
+ 45915: _Code_name[3722:3734],
+ 45916: _Code_name[3734:3746],
+ 45917: _Code_name[3746:3758],
+ 45918: _Code_name[3758:3770],
+ 45919: _Code_name[3770:3782],
+ 45920: _Code_name[3782:3794],
+ 45921: _Code_name[3794:3805],
+ 45922: _Code_name[3805:3817],
+ 45923: _Code_name[3817:3829],
+ 45924: _Code_name[3829:3841],
+ 45925: _Code_name[3841:3853],
+ 45926: _Code_name[3853:3865],
+ 45927: _Code_name[3865:3877],
+ 45928: _Code_name[3877:3889],
+ 45929: _Code_name[3889:3901],
+ 45930: _Code_name[3901:3913],
+ 45931: _Code_name[3913:3925],
+ 45932: _Code_name[3925:3937],
+ 45933: _Code_name[3937:3950],
+ 45934: _Code_name[3950:3963],
+ 45935: _Code_name[3963:3976],
+ 45936: _Code_name[3976:3989],
+ 45937: _Code_name[3989:4002],
+ 45938: _Code_name[4002:4015],
+ 45939: _Code_name[4015:4028],
+ 45940: _Code_name[4028:4041],
+ 45941: _Code_name[4041:4054],
+ 45942: _Code_name[4054:4067],
+ 45943: _Code_name[4067:4080],
+ 45944: _Code_name[4080:4093],
+ 45945: _Code_name[4093:4106],
+ 45946: _Code_name[4106:4119],
+ 45947: _Code_name[4119:4132],
+ 45948: _Code_name[4132:4145],
+ 45949: _Code_name[4145:4158],
+ 45950: _Code_name[4158:4171],
+ 45951: _Code_name[4171:4184],
+ 45952: _Code_name[4184:4197],
+ 45953: _Code_name[4197:4210],
+ 45954: _Code_name[4210:4223],
+ 45955: _Code_name[4223:4236],
+ 45956: _Code_name[4236:4249],
+ 45957: _Code_name[4249:4262],
+ 45958: _Code_name[4262:4275],
+ 45959: _Code_name[4275:4288],
+ 45960: _Code_name[4288:4301],
+ 45961: _Code_name[4301:4314],
+ 45962: _Code_name[4314:4327],
+ 45963: _Code_name[4327:4340],
+ 45964: _Code_name[4340:4353],
+ 45965: _Code_name[4353:4366],
+ 45966: _Code_name[4366:4379],
+ 45967: _Code_name[4379:4392],
+ 45968: _Code_name[4392:4405],
+ 45969: _Code_name[4405:4418],
+ 45970: _Code_name[4418:4431],
+ 45971: _Code_name[4431:4444],
+ 45972: _Code_name[4444:4457],
+ 45973: _Code_name[4457:4470],
+ 45974: _Code_name[4470:4483],
+ 45975: _Code_name[4483:4496],
+ 45976: _Code_name[4496:4509],
+ 45977: _Code_name[4509:4522],
+ 45978: _Code_name[4522:4535],
+ 45979: _Code_name[4535:4548],
+ 45980: _Code_name[4548:4561],
+ 45981: _Code_name[4561:4574],
+ 45982: _Code_name[4574:4587],
+ 45983: _Code_name[4587:4600],
+ 45984: _Code_name[4600:4613],
+ 45985: _Code_name[4613:4626],
+ 45986: _Code_name[4626:4639],
+ 45987: _Code_name[4639:4652],
+ 45988: _Code_name[4652:4665],
+ 45989: _Code_name[4665:4678],
+ 45990: _Code_name[4678:4691],
+ 45991: _Code_name[4691:4704],
+ 45992: _Code_name[4704:4717],
+ 45993: _Code_name[4717:4730],
+ 45994: _Code_name[4730:4743],
+ 45995: _Code_name[4743:4756],
+ 45996: _Code_name[4756:4769],
+ 45997: _Code_name[4769:4782],
+ 45998: _Code_name[4782:4795],
+ 45999: _Code_name[4795:4808],
+ 46000: _Code_name[4808:4821],
+ 46001: _Code_name[4821:4834],
+ 46002: _Code_name[4834:4847],
+ 46003: _Code_name[4847:4860],
+ 46004: _Code_name[4860:4873],
+ 46005: _Code_name[4873:4886],
+ 46006: _Code_name[4886:4899],
+ 46007: _Code_name[4899:4912],
+ 46008: _Code_name[4912:4925],
+ 46009: _Code_name[4925:4938],
+ 46010: _Code_name[4938:4951],
+ 46011: _Code_name[4951:4964],
+ 46012: _Code_name[4964:4977],
+ 46013: _Code_name[4977:4990],
+ 46014: _Code_name[4990:5003],
+ 46015: _Code_name[5003:5016],
+ 46016: _Code_name[5016:5029],
+ 46017: _Code_name[5029:5042],
+ 46018: _Code_name[5042:5055],
+ 46019: _Code_name[5055:5068],
+ 46020: _Code_name[5068:5081],
+ 46021: _Code_name[5081:5094],
+ 46022: _Code_name[5094:5107],
+ 46023: _Code_name[5107:5120],
+ 46024: _Code_name[5120:5133],
+ 46025: _Code_name[5133:5146],
+ 46026: _Code_name[5146:5159],
+ 46027: _Code_name[5159:5172],
+ 46028: _Code_name[5172:5185],
+ 46029: _Code_name[5185:5198],
+ 46030: _Code_name[5198:5211],
+ 46031: _Code_name[5211:5224],
+ 46032: _Code_name[5224:5237],
+ 46033: _Code_name[5237:5250],
+ 46034: _Code_name[5250:5263],
+ 46035: _Code_name[5263:5276],
+ 46036: _Code_name[5276:5289],
+ 46037: _Code_name[5289:5302],
+ 46038: _Code_name[5302:5315],
+ 46039: _Code_name[5315:5328],
+ 46040: _Code_name[5328:5341],
+ 46041: _Code_name[5341:5354],
+ 46042: _Code_name[5354:5367],
+ 46043: _Code_name[5367:5380],
+ 46044: _Code_name[5380:5393],
+ 46045: _Code_name[5393:5407],
+ 46046: _Code_name[5407:5421],
+ 46047: _Code_name[5421:5435],
+ 46048: _Code_name[5435:5449],
+ 46049: _Code_name[5449:5455],
+ 46050: _Code_name[5455:5461],
+ 46051: _Code_name[5461:5468],
+ 46052: _Code_name[5468:5476],
+ 46081: _Code_name[5476:5501],
+ 46082: _Code_name[5501:5529],
+ 46083: _Code_name[5529:5545],
+ 46337: _Code_name[5545:5548],
+ 46338: _Code_name[5548:5564],
+ 46593: _Code_name[5564:5572],
+ 52225: _Code_name[5572:5576],
+ 52753: _Code_name[5576:5597],
+ 53248: _Code_name[5597:5603],
+ 53479: _Code_name[5603:5609],
+ 53482: _Code_name[5609:5625],
+ 53483: _Code_name[5625:5641],
+ 53485: _Code_name[5641:5646],
+ 53649: _Code_name[5646:5653],
+ 60241: _Code_name[5653:5664],
+ 61697: _Code_name[5664:5687],
+ 61698: _Code_name[5687:5708],
+ 7367777: _Code_name[5708:5719],
+ 8417572: _Code_name[5719:5735],
+ 8483108: _Code_name[5735:5751],
+ 9728292: _Code_name[5751:5767],
+ 9793828: _Code_name[5767:5783],
+ 10645796: _Code_name[5783:5799],
+ 10711332: _Code_name[5799:5815],
+ 11639056: _Code_name[5815:5824],
+ 11704592: _Code_name[5824:5834],
+ 11770128: _Code_name[5834:5845],
+ 11835664: _Code_name[5845:5856],
+ 13636096: _Code_name[5856:5861],
+ 13636097: _Code_name[5861:5866],
+ 13636098: _Code_name[5866:5871],
+ 13636101: _Code_name[5871:5876],
}
func (i Code) String() string {
diff --git a/vendor/github.com/multiformats/go-multicodec/code_table.go b/vendor/github.com/multiformats/go-multicodec/code_table.go
index 0fb9ecbf8..b727a4e16 100644
--- a/vendor/github.com/multiformats/go-multicodec/code_table.go
+++ b/vendor/github.com/multiformats/go-multicodec/code_table.go
@@ -69,10 +69,10 @@ const (
// Dccp is a draft code tagged "multiaddr".
Dccp Code = 0x21 // dccp
- // Murmur3X64_64 is a permanent code tagged "multihash" and described by: The first 64-bits of a murmur3-x64-128 - used for UnixFS directory sharding..
+ // Murmur3X64_64 is a permanent code tagged "hash" and described by: The first 64-bits of a murmur3-x64-128 - used for UnixFS directory sharding..
Murmur3X64_64 Code = 0x22 // murmur3-x64-64
- // Murmur3_32 is a draft code tagged "multihash".
+ // Murmur3_32 is a draft code tagged "hash".
Murmur3_32 Code = 0x23 // murmur3-32
// Ip6 is a permanent code tagged "multiaddr".
@@ -81,6 +81,9 @@ const (
// Ip6zone is a draft code tagged "multiaddr".
Ip6zone Code = 0x2a // ip6zone
+ // Ipcidr is a draft code tagged "multiaddr" and described by: CIDR mask for IP addresses.
+ Ipcidr Code = 0x2b // ipcidr
+
// Path is a permanent code tagged "namespace" and described by: Namespace for string paths. Corresponds to `/` in ASCII..
Path Code = 0x2f // path
@@ -243,26 +246,23 @@ const (
// Md5 is a draft code tagged "multihash".
Md5 Code = 0xd5 // md5
- // Bmt is a draft code tagged "multihash" and described by: Binary Merkle Tree Hash.
- Bmt Code = 0xd6 // bmt
-
// DecredBlock is a draft code tagged "ipld" and described by: Decred Block.
DecredBlock Code = 0xe0 // decred-block
// DecredTx is a draft code tagged "ipld" and described by: Decred Tx.
DecredTx Code = 0xe1 // decred-tx
- // IpldNs is a draft code tagged "namespace" and described by: IPLD path.
- IpldNs Code = 0xe2 // ipld-ns
+ // Ipld is a draft code tagged "namespace" and described by: IPLD path.
+ Ipld Code = 0xe2 // ipld
- // IpfsNs is a draft code tagged "namespace" and described by: IPFS path.
- IpfsNs Code = 0xe3 // ipfs-ns
+ // Ipfs is a draft code tagged "namespace" and described by: IPFS path.
+ Ipfs Code = 0xe3 // ipfs
- // SwarmNs is a draft code tagged "namespace" and described by: Swarm path.
- SwarmNs Code = 0xe4 // swarm-ns
+ // Swarm is a draft code tagged "namespace" and described by: Swarm path.
+ Swarm Code = 0xe4 // swarm
- // IpnsNs is a draft code tagged "namespace" and described by: IPNS path.
- IpnsNs Code = 0xe5 // ipns-ns
+ // Ipns is a draft code tagged "namespace" and described by: IPNS path.
+ Ipns Code = 0xe5 // ipns
// Zeronet is a draft code tagged "namespace" and described by: ZeroNet site address.
Zeronet Code = 0xe6 // zeronet
@@ -270,6 +270,9 @@ const (
// Secp256k1Pub is a draft code tagged "key" and described by: Secp256k1 public key (compressed).
Secp256k1Pub Code = 0xe7 // secp256k1-pub
+ // Dnslink is a permanent code tagged "namespace" and described by: DNSLink path.
+ Dnslink Code = 0xe8 // dnslink
+
// Bls12_381G1Pub is a draft code tagged "key" and described by: BLS12-381 public key in the G1 field.
Bls12_381G1Pub Code = 0xea // bls12_381-g1-pub
@@ -285,6 +288,9 @@ const (
// Bls12_381G1g2Pub is a draft code tagged "key" and described by: BLS12-381 concatenated public keys in both the G1 and G2 fields.
Bls12_381G1g2Pub Code = 0xee // bls12_381-g1g2-pub
+ // Sr25519Pub is a draft code tagged "key" and described by: Sr25519 public key.
+ Sr25519Pub Code = 0xef // sr25519-pub
+
// DashBlock is a draft code tagged "ipld" and described by: Dash Block.
DashBlock Code = 0xf0 // dash-block
@@ -297,18 +303,27 @@ const (
// SwarmFeed is a draft code tagged "ipld" and described by: Swarm Feed.
SwarmFeed Code = 0xfb // swarm-feed
+ // Beeson is a draft code tagged "ipld" and described by: Swarm BeeSon.
+ Beeson Code = 0xfc // beeson
+
// Udp is a draft code tagged "multiaddr".
Udp Code = 0x0111 // udp
- // P2pWebrtcStar is a draft code tagged "multiaddr".
+ // P2pWebrtcStar is a deprecated code tagged "multiaddr" and described by: Use webrtc or webrtc-direct instead.
P2pWebrtcStar Code = 0x0113 // p2p-webrtc-star
- // P2pWebrtcDirect is a draft code tagged "multiaddr".
+ // P2pWebrtcDirect is a deprecated code tagged "multiaddr" and described by: Use webrtc or webrtc-direct instead.
P2pWebrtcDirect Code = 0x0114 // p2p-webrtc-direct
- // P2pStardust is a draft code tagged "multiaddr".
+ // P2pStardust is a deprecated code tagged "multiaddr".
P2pStardust Code = 0x0115 // p2p-stardust
+ // WebrtcDirect is a draft code tagged "multiaddr" and described by: ICE-lite webrtc transport with SDP munging during connection establishment and without use of a STUN server.
+ WebrtcDirect Code = 0x0118 // webrtc-direct
+
+ // Webrtc is a draft code tagged "multiaddr" and described by: webrtc transport where connection establishment is according to w3c spec.
+ Webrtc Code = 0x0119 // webrtc
+
// P2pCircuit is a permanent code tagged "multiaddr".
P2pCircuit Code = 0x0122 // p2p-circuit
@@ -321,6 +336,12 @@ const (
// Utp is a draft code tagged "multiaddr".
Utp Code = 0x012e // utp
+ // Crc32 is a draft code tagged "hash" and described by: CRC-32 non-cryptographic hash algorithm (IEEE 802.3).
+ Crc32 Code = 0x0132 // crc32
+
+ // Crc64Ecma is a draft code tagged "hash" and described by: CRC-64 non-cryptographic hash algorithm (ECMA-182 - Annex B).
+ Crc64Ecma Code = 0x0164 // crc64-ecma
+
// Unix is a permanent code tagged "multiaddr".
Unix Code = 0x0190 // unix
@@ -330,9 +351,6 @@ const (
// P2p is a permanent code tagged "multiaddr" and described by: libp2p.
P2p Code = 0x01a5 // p2p
- // Deprecated: Ipfs is a draft code tagged "multiaddr" and described by: libp2p (deprecated).
- Ipfs Code = 0x01a5 // ipfs
-
// Https is a draft code tagged "multiaddr".
Https Code = 0x01bb // https
@@ -351,12 +369,24 @@ const (
// Tls is a draft code tagged "multiaddr".
Tls Code = 0x01c0 // tls
+ // Sni is a draft code tagged "multiaddr" and described by: Server Name Indication RFC 6066 § 3.
+ Sni Code = 0x01c1 // sni
+
// Noise is a draft code tagged "multiaddr".
Noise Code = 0x01c6 // noise
// Quic is a permanent code tagged "multiaddr".
Quic Code = 0x01cc // quic
+ // QuicV1 is a permanent code tagged "multiaddr".
+ QuicV1 Code = 0x01cd // quic-v1
+
+ // Webtransport is a draft code tagged "multiaddr".
+ Webtransport Code = 0x01d1 // webtransport
+
+ // Certhash is a draft code tagged "multiaddr" and described by: TLS certificate's fingerprint as a multihash.
+ Certhash Code = 0x01d2 // certhash
+
// Ws is a permanent code tagged "multiaddr".
Ws Code = 0x01dd // ws
@@ -381,12 +411,18 @@ const (
// Car is a draft code tagged "serialization" and described by: Content Addressable aRchive (CAR).
Car Code = 0x0202 // car
+ // IpnsRecord is a permanent code tagged "serialization" and described by: Signed IPNS Record.
+ IpnsRecord Code = 0x0300 // ipns-record
+
// Libp2pPeerRecord is a permanent code tagged "libp2p" and described by: libp2p peer record type.
Libp2pPeerRecord Code = 0x0301 // libp2p-peer-record
// Libp2pRelayRsvp is a permanent code tagged "libp2p" and described by: libp2p relay reservation voucher.
Libp2pRelayRsvp Code = 0x0302 // libp2p-relay-rsvp
+ // Memorytransport is a permanent code tagged "libp2p" and described by: in memory transport for self-dialing and testing; arbitrary.
+ Memorytransport Code = 0x0309 // memorytransport
+
// CarIndexSorted is a draft code tagged "serialization" and described by: CARv2 IndexSorted index format.
CarIndexSorted Code = 0x0400 // car-index-sorted
@@ -399,6 +435,12 @@ const (
// TransportGraphsyncFilecoinv1 is a draft code tagged "transport" and described by: Filecoin graphsync datatransfer.
TransportGraphsyncFilecoinv1 Code = 0x0910 // transport-graphsync-filecoinv1
+ // TransportIpfsGatewayHttp is a draft code tagged "transport" and described by: HTTP IPFS Gateway trustless datatransfer.
+ TransportIpfsGatewayHttp Code = 0x0920 // transport-ipfs-gateway-http
+
+ // Multidid is a draft code tagged "multiformat" and described by: Compact encoding for Decentralized Identifers.
+ Multidid Code = 0x0d1d // multidid
+
// Sha2_256Trunc254Padded is a permanent code tagged "multihash" and described by: SHA2-256 with the two most significant bits from the last byte zeroed (as via a mask with 0b00111111) - used for proving trees as in Filecoin.
Sha2_256Trunc254Padded Code = 0x1012 // sha2-256-trunc254-padded
@@ -411,7 +453,7 @@ const (
// Sha2_512_256 is a permanent code tagged "multihash" and described by: aka SHA-512/256; as specified by FIPS 180-4..
Sha2_512_256 Code = 0x1015 // sha2-512-256
- // Murmur3X64_128 is a draft code tagged "multihash".
+ // Murmur3X64_128 is a draft code tagged "hash".
Murmur3X64_128 Code = 0x1022 // murmur3-x64-128
// Ripemd128 is a draft code tagged "multihash".
@@ -447,6 +489,9 @@ const (
// RsaPub is a draft code tagged "key" and described by: RSA public key. DER-encoded ASN.1 type RSAPublicKey according to IETF RFC 8017 (PKCS #1).
RsaPub Code = 0x1205 // rsa-pub
+ // Sm2Pub is a draft code tagged "key" and described by: SM2 public key (compressed).
+ Sm2Pub Code = 0x1206 // sm2-pub
+
// Ed25519Priv is a draft code tagged "key" and described by: Ed25519 private key.
Ed25519Priv Code = 0x1300 // ed25519-priv
@@ -456,9 +501,30 @@ const (
// X25519Priv is a draft code tagged "key" and described by: Curve25519 private key.
X25519Priv Code = 0x1302 // x25519-priv
+ // Sr25519Priv is a draft code tagged "key" and described by: Sr25519 private key.
+ Sr25519Priv Code = 0x1303 // sr25519-priv
+
+ // RsaPriv is a draft code tagged "key" and described by: RSA private key.
+ RsaPriv Code = 0x1305 // rsa-priv
+
+ // P256Priv is a draft code tagged "key" and described by: P-256 private key.
+ P256Priv Code = 0x1306 // p256-priv
+
+ // P384Priv is a draft code tagged "key" and described by: P-384 private key.
+ P384Priv Code = 0x1307 // p384-priv
+
+ // P521Priv is a draft code tagged "key" and described by: P-521 private key.
+ P521Priv Code = 0x1308 // p521-priv
+
// Kangarootwelve is a draft code tagged "multihash" and described by: KangarooTwelve is an extendable-output hash function based on Keccak-p.
Kangarootwelve Code = 0x1d01 // kangarootwelve
+ // AesGcm256 is a draft code tagged "encryption" and described by: AES Galois/Counter Mode with 256-bit key and 12-byte IV.
+ AesGcm256 Code = 0x2000 // aes-gcm-256
+
+ // Silverpine is a draft code tagged "multiaddr" and described by: Experimental QUIC over yggdrasil and ironwood routing protocol.
+ Silverpine Code = 0x3f42 // silverpine
+
// Sm3_256 is a draft code tagged "multihash".
Sm3_256 Code = 0x534d // sm3-256
@@ -1422,15 +1488,63 @@ const (
// Skein1024_1024 is a draft code tagged "multihash".
Skein1024_1024 Code = 0xb3e0 // skein1024-1024
+ // Xxh32 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm.
+ Xxh32 Code = 0xb3e1 // xxh-32
+
+ // Xxh64 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm.
+ Xxh64 Code = 0xb3e2 // xxh-64
+
+ // Xxh3_64 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm.
+ Xxh3_64 Code = 0xb3e3 // xxh3-64
+
+ // Xxh3_128 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm.
+ Xxh3_128 Code = 0xb3e4 // xxh3-128
+
// PoseidonBls12_381A2Fc1 is a permanent code tagged "multihash" and described by: Poseidon using BLS12-381 and arity of 2 with Filecoin parameters.
PoseidonBls12_381A2Fc1 Code = 0xb401 // poseidon-bls12_381-a2-fc1
// PoseidonBls12_381A2Fc1Sc is a draft code tagged "multihash" and described by: Poseidon using BLS12-381 and arity of 2 with Filecoin parameters - high-security variant.
PoseidonBls12_381A2Fc1Sc Code = 0xb402 // poseidon-bls12_381-a2-fc1-sc
+ // Urdca2015Canon is a draft code tagged "ipld" and described by: The result of canonicalizing an input according to URDCA-2015 and then expressing its hash value as a multihash value..
+ Urdca2015Canon Code = 0xb403 // urdca-2015-canon
+
+ // Ssz is a draft code tagged "serialization" and described by: SimpleSerialize (SSZ) serialization.
+ Ssz Code = 0xb501 // ssz
+
+ // SszSha2_256Bmt is a draft code tagged "multihash" and described by: SSZ Merkle tree root using SHA2-256 as the hashing function and SSZ serialization for the block binary.
+ SszSha2_256Bmt Code = 0xb502 // ssz-sha2-256-bmt
+
+ // JsonJcs is a draft code tagged "ipld" and described by: The result of canonicalizing an input according to JCS - JSON Canonicalisation Scheme (RFC 8785).
+ JsonJcs Code = 0xb601 // json-jcs
+
+ // Iscc is a draft code tagged "softhash" and described by: ISCC (International Standard Content Code) - similarity preserving hash.
+ Iscc Code = 0xcc01 // iscc
+
// ZeroxcertImprint256 is a draft code tagged "zeroxcert" and described by: 0xcert Asset Imprint (root hash).
ZeroxcertImprint256 Code = 0xce11 // zeroxcert-imprint-256
+ // Varsig is a draft code tagged "varsig" and described by: Namespace for all not yet standard signature algorithms.
+ Varsig Code = 0xd000 // varsig
+
+ // Es256k is a draft code tagged "varsig" and described by: ES256K Siganture Algorithm (secp256k1).
+ Es256k Code = 0xd0e7 // es256k
+
+ // Bls12381G1Sig is a draft code tagged "varsig" and described by: G1 signature for BLS-12381-G2.
+ Bls12381G1Sig Code = 0xd0ea // bls-12381-g1-sig
+
+ // Bls12381G2Sig is a draft code tagged "varsig" and described by: G2 signature for BLS-12381-G1.
+ Bls12381G2Sig Code = 0xd0eb // bls-12381-g2-sig
+
+ // Eddsa is a draft code tagged "varsig" and described by: Edwards-Curve Digital Signature Algorithm.
+ Eddsa Code = 0xd0ed // eddsa
+
+ // Eip191 is a draft code tagged "varsig" and described by: EIP-191 Ethereum Signed Data Standard.
+ Eip191 Code = 0xd191 // eip-191
+
+ // Jwk_jcsPub is a draft code tagged "key" and described by: JSON object containing only the required members of a JWK (RFC 7518 and RFC 7517) representing the public key. Serialisation based on JCS (RFC 8785).
+ Jwk_jcsPub Code = 0xeb51 // jwk_jcs-pub
+
// FilCommitmentUnsealed is a permanent code tagged "filecoin" and described by: Filecoin piece or sector data commitment merkle node/root (CommP & CommD).
FilCommitmentUnsealed Code = 0xf101 // fil-commitment-unsealed
@@ -1466,6 +1580,21 @@ const (
// SubspaceNs is a draft code tagged "namespace" and described by: Subspace Network Namespace.
SubspaceNs Code = 0xb39910 // subspace-ns
+
+ // KumandraNs is a draft code tagged "namespace" and described by: Kumandra Network Namespace.
+ KumandraNs Code = 0xb49910 // kumandra-ns
+
+ // Es256 is a draft code tagged "varsig" and described by: ES256 Signature Algorithm.
+ Es256 Code = 0xd01200 // es256
+
+ // Es284 is a draft code tagged "varsig" and described by: ES384 Signature Algorithm.
+ Es284 Code = 0xd01201 // es284
+
+ // Es512 is a draft code tagged "varsig" and described by: ES512 Signature Algorithm.
+ Es512 Code = 0xd01202 // es512
+
+ // Rs256 is a draft code tagged "varsig" and described by: RS256 Signature Algorithm.
+ Rs256 Code = 0xd01205 // rs256
)
var knownCodes = []Code{
@@ -1495,6 +1624,7 @@ var knownCodes = []Code{
Murmur3_32,
Ip6,
Ip6zone,
+ Ipcidr,
Path,
Multicodec,
Multihash,
@@ -1549,44 +1679,53 @@ var knownCodes = []Code{
StellarTx,
Md4,
Md5,
- Bmt,
DecredBlock,
DecredTx,
- IpldNs,
- IpfsNs,
- SwarmNs,
- IpnsNs,
+ Ipld,
+ Ipfs,
+ Swarm,
+ Ipns,
Zeronet,
Secp256k1Pub,
+ Dnslink,
Bls12_381G1Pub,
Bls12_381G2Pub,
X25519Pub,
Ed25519Pub,
Bls12_381G1g2Pub,
+ Sr25519Pub,
DashBlock,
DashTx,
SwarmManifest,
SwarmFeed,
+ Beeson,
Udp,
P2pWebrtcStar,
P2pWebrtcDirect,
P2pStardust,
+ WebrtcDirect,
+ Webrtc,
P2pCircuit,
DagJson,
Udt,
Utp,
+ Crc32,
+ Crc64Ecma,
Unix,
Thread,
P2p,
- Ipfs,
Https,
Onion,
Onion3,
Garlic64,
Garlic32,
Tls,
+ Sni,
Noise,
Quic,
+ QuicV1,
+ Webtransport,
+ Certhash,
Ws,
Wss,
P2pWebsocketStar,
@@ -1595,12 +1734,16 @@ var knownCodes = []Code{
Json,
Messagepack,
Car,
+ IpnsRecord,
Libp2pPeerRecord,
Libp2pRelayRsvp,
+ Memorytransport,
CarIndexSorted,
CarMultihashIndexSorted,
TransportBitswap,
TransportGraphsyncFilecoinv1,
+ TransportIpfsGatewayHttp,
+ Multidid,
Sha2_256Trunc254Padded,
Sha2_224,
Sha2_512_224,
@@ -1617,10 +1760,18 @@ var knownCodes = []Code{
Ed448Pub,
X448Pub,
RsaPub,
+ Sm2Pub,
Ed25519Priv,
Secp256k1Priv,
X25519Priv,
+ Sr25519Priv,
+ RsaPriv,
+ P256Priv,
+ P384Priv,
+ P521Priv,
Kangarootwelve,
+ AesGcm256,
+ Silverpine,
Sm3_256,
Blake2b8,
Blake2b16,
@@ -1942,9 +2093,25 @@ var knownCodes = []Code{
Skein1024_1008,
Skein1024_1016,
Skein1024_1024,
+ Xxh32,
+ Xxh64,
+ Xxh3_64,
+ Xxh3_128,
PoseidonBls12_381A2Fc1,
PoseidonBls12_381A2Fc1Sc,
+ Urdca2015Canon,
+ Ssz,
+ SszSha2_256Bmt,
+ JsonJcs,
+ Iscc,
ZeroxcertImprint256,
+ Varsig,
+ Es256k,
+ Bls12381G1Sig,
+ Bls12381G2Sig,
+ Eddsa,
+ Eip191,
+ Jwk_jcsPub,
FilCommitmentUnsealed,
FilCommitmentSealed,
Plaintextv2,
@@ -1957,6 +2124,11 @@ var knownCodes = []Code{
SkynetNs,
ArweaveNs,
SubspaceNs,
+ KumandraNs,
+ Es256,
+ Es284,
+ Es512,
+ Rs256,
}
func (c Code) Tag() string {
@@ -1966,10 +2138,24 @@ func (c Code) Tag() string {
Cidv3:
return "cid"
+ case AesGcm256:
+ return "encryption"
+
case FilCommitmentUnsealed,
FilCommitmentSealed:
return "filecoin"
+ case Murmur3X64_64,
+ Murmur3_32,
+ Crc32,
+ Crc64Ecma,
+ Murmur3X64_128,
+ Xxh32,
+ Xxh64,
+ Xxh3_64,
+ Xxh3_128:
+ return "hash"
+
case HolochainAdrV0,
HolochainAdrV1,
HolochainKeyV0,
@@ -2015,9 +2201,12 @@ func (c Code) Tag() string {
DashTx,
SwarmManifest,
SwarmFeed,
+ Beeson,
DagJson,
Swhid1Snp,
- Json:
+ Json,
+ Urdca2015Canon,
+ JsonJcs:
return "ipld"
case Aes128,
@@ -2031,19 +2220,28 @@ func (c Code) Tag() string {
X25519Pub,
Ed25519Pub,
Bls12_381G1g2Pub,
+ Sr25519Pub,
P256Pub,
P384Pub,
P521Pub,
Ed448Pub,
X448Pub,
RsaPub,
+ Sm2Pub,
Ed25519Priv,
Secp256k1Priv,
- X25519Priv:
+ X25519Priv,
+ Sr25519Priv,
+ RsaPriv,
+ P256Priv,
+ P384Priv,
+ P521Priv,
+ Jwk_jcsPub:
return "key"
case Libp2pPeerRecord,
- Libp2pRelayRsvp:
+ Libp2pRelayRsvp,
+ Memorytransport:
return "libp2p"
case Ip4,
@@ -2051,6 +2249,7 @@ func (c Code) Tag() string {
Dccp,
Ip6,
Ip6zone,
+ Ipcidr,
Dns,
Dns4,
Dns6,
@@ -2060,6 +2259,8 @@ func (c Code) Tag() string {
P2pWebrtcStar,
P2pWebrtcDirect,
P2pStardust,
+ WebrtcDirect,
+ Webrtc,
P2pCircuit,
Udt,
Utp,
@@ -2072,12 +2273,17 @@ func (c Code) Tag() string {
Garlic64,
Garlic32,
Tls,
+ Sni,
Noise,
Quic,
+ QuicV1,
+ Webtransport,
+ Certhash,
Ws,
Wss,
P2pWebsocketStar,
Http,
+ Silverpine,
Plaintextv2:
return "multiaddr"
@@ -2085,7 +2291,8 @@ func (c Code) Tag() string {
Multihash,
Multiaddr,
Multibase,
- Caip50:
+ Caip50,
+ Multidid:
return "multiformat"
case Identity,
@@ -2104,17 +2311,13 @@ func (c Code) Tag() string {
Keccak512,
Blake3,
Sha2_384,
- Murmur3X64_64,
- Murmur3_32,
DblSha2_256,
Md4,
Md5,
- Bmt,
Sha2_256Trunc254Padded,
Sha2_224,
Sha2_512_224,
Sha2_512_256,
- Murmur3X64_128,
Ripemd128,
Ripemd160,
Ripemd256,
@@ -2443,19 +2646,22 @@ func (c Code) Tag() string {
Skein1024_1016,
Skein1024_1024,
PoseidonBls12_381A2Fc1,
- PoseidonBls12_381A2Fc1Sc:
+ PoseidonBls12_381A2Fc1Sc,
+ SszSha2_256Bmt:
return "multihash"
case Path,
Streamid,
- IpldNs,
- IpfsNs,
- SwarmNs,
- IpnsNs,
+ Ipld,
+ Ipfs,
+ Swarm,
+ Ipns,
Zeronet,
+ Dnslink,
SkynetNs,
ArweaveNs,
- SubspaceNs:
+ SubspaceNs,
+ KumandraNs:
return "namespace"
case Protobuf,
@@ -2463,14 +2669,32 @@ func (c Code) Tag() string {
Bencode,
Messagepack,
Car,
+ IpnsRecord,
CarIndexSorted,
- CarMultihashIndexSorted:
+ CarMultihashIndexSorted,
+ Ssz:
return "serialization"
+ case Iscc:
+ return "softhash"
+
case TransportBitswap,
- TransportGraphsyncFilecoinv1:
+ TransportGraphsyncFilecoinv1,
+ TransportIpfsGatewayHttp:
return "transport"
+ case Varsig,
+ Es256k,
+ Bls12381G1Sig,
+ Bls12381G2Sig,
+ Eddsa,
+ Eip191,
+ Es256,
+ Es284,
+ Es512,
+ Rs256:
+ return "varsig"
+
case ZeroxcertImprint256:
return "zeroxcert"
default:
diff --git a/vendor/github.com/multiformats/go-multicodec/version.json b/vendor/github.com/multiformats/go-multicodec/version.json
index fc15ae013..960b84e55 100644
--- a/vendor/github.com/multiformats/go-multicodec/version.json
+++ b/vendor/github.com/multiformats/go-multicodec/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.5.0"
+ "version": "v0.9.0"
}
diff --git a/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go b/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go
index 9a623c51b..26eabf748 100644
--- a/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go
+++ b/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go
@@ -1,18 +1,18 @@
/*
- This package has no purpose except to perform registration of mulithashes.
+This package has no purpose except to perform registration of mulithashes.
- It is meant to be used as a side-effecting import, e.g.
+It is meant to be used as a side-effecting import, e.g.
- import (
- _ "github.com/multiformats/go-multihash/register/all"
- )
+ import (
+ _ "github.com/multiformats/go-multihash/register/all"
+ )
- This package registers many multihashes at once.
- Importing it will increase the size of your dependency tree significantly.
- It's recommended that you import this package if you're building some
- kind of data broker application, which may need to handle many different kinds of hashes;
- if you're building an application which you know only handles a specific hash,
- importing this package may bloat your builds unnecessarily.
+This package registers many multihashes at once.
+Importing it will increase the size of your dependency tree significantly.
+It's recommended that you import this package if you're building some
+kind of data broker application, which may need to handle many different kinds of hashes;
+if you're building an application which you know only handles a specific hash,
+importing this package may bloat your builds unnecessarily.
*/
package all
diff --git a/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go b/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go
index 6794df764..cda26253c 100644
--- a/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go
+++ b/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go
@@ -18,7 +18,7 @@ import (
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/blake2s"
- "github.com/multiformats/go-multihash/core"
+ multihash "github.com/multiformats/go-multihash/core"
)
const (
diff --git a/vendor/github.com/multiformats/go-multihash/register/blake3/multihash_blake3.go b/vendor/github.com/multiformats/go-multihash/register/blake3/multihash_blake3.go
index 143fb57d6..d9131b882 100644
--- a/vendor/github.com/multiformats/go-multihash/register/blake3/multihash_blake3.go
+++ b/vendor/github.com/multiformats/go-multihash/register/blake3/multihash_blake3.go
@@ -1,11 +1,11 @@
/*
- This package has no purpose except to register the blake3 hash function.
+This package has no purpose except to register the blake3 hash function.
- It is meant to be used as a side-effecting import, e.g.
+It is meant to be used as a side-effecting import, e.g.
- import (
- _ "github.com/multiformats/go-multihash/register/blake3"
- )
+ import (
+ _ "github.com/multiformats/go-multihash/register/blake3"
+ )
*/
package blake3
@@ -14,7 +14,7 @@ import (
"lukechampine.com/blake3"
- "github.com/multiformats/go-multihash/core"
+ multihash "github.com/multiformats/go-multihash/core"
)
const DefaultSize = 32
diff --git a/vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go b/vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go
deleted file mode 100644
index 66eccd569..000000000
--- a/vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- This package has no purpose except to perform registration of multihashes.
-
- It is meant to be used as a side-effecting import, e.g.
-
- import (
- _ "github.com/multiformats/go-multihash/register/miniosha256"
- )
-
- This package registers alternative implementations for sha2-256, using
- the github.com/minio/sha256-simd library.
-*/
-package miniosha256
-
-import (
- "github.com/minio/sha256-simd"
-
- "github.com/multiformats/go-multihash/core"
-)
-
-func init() {
- multihash.Register(multihash.SHA2_256, sha256.New)
-}
diff --git a/vendor/github.com/multiformats/go-multihash/register/miniosha256/post_go1.21.go b/vendor/github.com/multiformats/go-multihash/register/miniosha256/post_go1.21.go
new file mode 100644
index 000000000..0a3541115
--- /dev/null
+++ b/vendor/github.com/multiformats/go-multihash/register/miniosha256/post_go1.21.go
@@ -0,0 +1,22 @@
+//go:build go1.21
+
+// This package has no purpose except to perform registration of multihashes.
+//
+// It is meant to be used as a side-effecting import, e.g.
+//
+// import (
+// _ "github.com/multiformats/go-multihash/register/miniosha256"
+// )
+//
+// This package registers alternative implementations for sha2-256, using
+// the github.com/minio/sha256-simd library for go1.20 and bellow. Go 1.21 and
+// later fallback to [github.com/multiformats/go-multihash/register/sha256].
+//
+// Deprecated: please switch to [github.com/multiformats/go-multihash/register/sha256]
+// as of go1.21 the go std has a SHANI implementation that is just as fast. See https://go.dev/issue/50543.
+// This will be removed shortly after go1.22 is released.
+package miniosha256
+
+import (
+ _ "github.com/multiformats/go-multihash/register/sha256"
+)
diff --git a/vendor/github.com/multiformats/go-multihash/register/miniosha256/pre_go1_21.go b/vendor/github.com/multiformats/go-multihash/register/miniosha256/pre_go1_21.go
new file mode 100644
index 000000000..270861bab
--- /dev/null
+++ b/vendor/github.com/multiformats/go-multihash/register/miniosha256/pre_go1_21.go
@@ -0,0 +1,29 @@
+//go:build !go1.21
+
+// This package has no purpose except to perform registration of multihashes.
+//
+// It is meant to be used as a side-effecting import, e.g.
+//
+// import (
+// _ "github.com/multiformats/go-multihash/register/miniosha256"
+// )
+//
+// This package registers alternative implementations for sha2-256, using
+// the github.com/minio/sha256-simd library for go1.20 and bellow. Go 1.21 and
+// later fallback to [github.com/multiformats/go-multihash/register/sha256].
+//
+// Note if you are using go1.21 or above this package is deprecated in favor of
+// [github.com/multiformats/go-multihash/register/sha256] because as of go1.21
+// the go std has a SHANI implementation that is just as fast. See https://go.dev/issue/50543.
+// This will be removed shortly after go1.22 is released.
+package miniosha256
+
+import (
+ "github.com/minio/sha256-simd"
+
+ multihash "github.com/multiformats/go-multihash/core"
+)
+
+func init() {
+ multihash.Register(multihash.SHA2_256, sha256.New)
+}
diff --git a/vendor/github.com/multiformats/go-multihash/register/murmur3/multihash_murmur3.go b/vendor/github.com/multiformats/go-multihash/register/murmur3/multihash_murmur3.go
index cdf6d6943..15890e549 100644
--- a/vendor/github.com/multiformats/go-multihash/register/murmur3/multihash_murmur3.go
+++ b/vendor/github.com/multiformats/go-multihash/register/murmur3/multihash_murmur3.go
@@ -1,13 +1,13 @@
/*
- This package has no purpose except to perform registration of multihashes.
+This package has no purpose except to perform registration of multihashes.
- It is meant to be used as a side-effecting import, e.g.
+It is meant to be used as a side-effecting import, e.g.
- import (
- _ "github.com/multiformats/go-multihash/register/murmur3"
- )
+ import (
+ _ "github.com/multiformats/go-multihash/register/murmur3"
+ )
- This package registers multihashes for murmur3
+This package registers multihashes for murmur3
*/
package murmur3
diff --git a/vendor/github.com/multiformats/go-multihash/register/sha256/sha256.go b/vendor/github.com/multiformats/go-multihash/register/sha256/sha256.go
new file mode 100644
index 000000000..b5f30b2c6
--- /dev/null
+++ b/vendor/github.com/multiformats/go-multihash/register/sha256/sha256.go
@@ -0,0 +1,21 @@
+// This package has no purpose except to perform registration of multihashes.
+//
+// It is meant to be used as a side-effecting import, e.g.
+//
+// import (
+// _ "github.com/multiformats/go-multihash/register/sha256"
+// )
+//
+// This package an implementation of sha256 using the go std, this is recomanded
+// if you are using go1.21 or above.
+package sha256
+
+import (
+ "crypto/sha256"
+
+ multihash "github.com/multiformats/go-multihash/core"
+)
+
+func init() {
+ multihash.Register(multihash.SHA2_256, sha256.New)
+}
diff --git a/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go b/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go
index db70b2ba3..07e5c2ff0 100644
--- a/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go
+++ b/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go
@@ -1,15 +1,15 @@
/*
- This package has no purpose except to perform registration of multihashes.
+This package has no purpose except to perform registration of multihashes.
- It is meant to be used as a side-effecting import, e.g.
+It is meant to be used as a side-effecting import, e.g.
- import (
- _ "github.com/multiformats/go-multihash/register/sha3"
- )
+ import (
+ _ "github.com/multiformats/go-multihash/register/sha3"
+ )
- This package registers several multihashes for the sha3 family.
- This also includes some functions known as "shake" and "keccak",
- since they share much of their implementation and come in the same repos.
+This package registers several multihashes for the sha3 family.
+This also includes some functions known as "shake" and "keccak",
+since they share much of their implementation and come in the same repos.
*/
package sha3
@@ -18,7 +18,7 @@ import (
"golang.org/x/crypto/sha3"
- "github.com/multiformats/go-multihash/core"
+ multihash "github.com/multiformats/go-multihash/core"
)
func init() {
diff --git a/vendor/github.com/multiformats/go-multihash/sum.go b/vendor/github.com/multiformats/go-multihash/sum.go
index d40b5aabd..cf87bb4c0 100644
--- a/vendor/github.com/multiformats/go-multihash/sum.go
+++ b/vendor/github.com/multiformats/go-multihash/sum.go
@@ -24,7 +24,9 @@ func Sum(data []byte, code uint64, length int) (Multihash, error) {
}
// Feed data in.
- hasher.Write(data)
+ if _, err := hasher.Write(data); err != nil {
+ return nil, err
+ }
return encodeHash(hasher, code, length)
}
diff --git a/vendor/github.com/multiformats/go-multihash/version.json b/vendor/github.com/multiformats/go-multihash/version.json
index 88bcb867b..65b4a1520 100644
--- a/vendor/github.com/multiformats/go-multihash/version.json
+++ b/vendor/github.com/multiformats/go-multihash/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.2.1"
+ "version": "v0.2.2"
}
diff --git a/vendor/github.com/multiformats/go-multistream/.gitignore b/vendor/github.com/multiformats/go-multistream/.gitignore
deleted file mode 100644
index 29585fe79..000000000
--- a/vendor/github.com/multiformats/go-multistream/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*-fuzz.zip
diff --git a/vendor/github.com/multiformats/go-multistream/README.md b/vendor/github.com/multiformats/go-multistream/README.md
index 9666d2045..f766b6939 100644
--- a/vendor/github.com/multiformats/go-multistream/README.md
+++ b/vendor/github.com/multiformats/go-multistream/README.md
@@ -53,7 +53,7 @@ import (
// "/cats" and "/dogs" and exposes it on a localhost:8765. It then opens connections
// to that port, selects the protocols and tests that the handlers are working.
func main() {
- mux := ms.NewMultistreamMuxer()
+ mux := ms.NewMultistreamMuxer[string]()
mux.AddHandler("/cats", func(proto string, rwc io.ReadWriteCloser) error {
fmt.Fprintln(rwc, proto, ": HELLO I LIKE CATS")
return rwc.Close()
diff --git a/vendor/github.com/multiformats/go-multistream/client.go b/vendor/github.com/multiformats/go-multistream/client.go
index 811e3b395..013dd5abc 100644
--- a/vendor/github.com/multiformats/go-multistream/client.go
+++ b/vendor/github.com/multiformats/go-multistream/client.go
@@ -13,9 +13,22 @@ import (
"strings"
)
-// ErrNotSupported is the error returned when the muxer does not support
-// the protocol specified for the handshake.
-var ErrNotSupported = errors.New("protocol not supported")
+// ErrNotSupported is the error returned when the muxer doesn't support
+// the protocols tried for the handshake.
+type ErrNotSupported[T StringLike] struct {
+
+ // Slice of protocols that were not supported by the muxer
+ Protos []T
+}
+
+func (e ErrNotSupported[T]) Error() string {
+ return fmt.Sprintf("protocols not supported: %v", e.Protos)
+}
+
+func (e ErrNotSupported[T]) Is(target error) bool {
+ _, ok := target.(ErrNotSupported[T])
+ return ok
+}
// ErrNoProtocols is the error returned when the no protocols have been
// specified.
@@ -31,7 +44,7 @@ const (
// to inform the muxer of the protocol that will be used to communicate
// on this ReadWriteCloser. It returns an error if, for example,
// the muxer does not know how to handle this protocol.
-func SelectProtoOrFail(proto string, rwc io.ReadWriteCloser) (err error) {
+func SelectProtoOrFail[T StringLike](proto T, rwc io.ReadWriteCloser) (err error) {
defer func() {
if rerr := recover(); rerr != nil {
fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
@@ -66,7 +79,7 @@ func SelectProtoOrFail(proto string, rwc io.ReadWriteCloser) (err error) {
// SelectOneOf will perform handshakes with the protocols on the given slice
// until it finds one which is supported by the muxer.
-func SelectOneOf(protos []string, rwc io.ReadWriteCloser) (proto string, err error) {
+func SelectOneOf[T StringLike](protos []T, rwc io.ReadWriteCloser) (proto T, err error) {
defer func() {
if rerr := recover(); rerr != nil {
fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
@@ -83,21 +96,25 @@ func SelectOneOf(protos []string, rwc io.ReadWriteCloser) (proto string, err err
// can continue negotiating the rest of the protocols normally.
//
// This saves us a round trip.
- switch err := SelectProtoOrFail(protos[0], rwc); err {
+ switch err := SelectProtoOrFail(protos[0], rwc); err.(type) {
case nil:
return protos[0], nil
- case ErrNotSupported: // try others
+ case ErrNotSupported[T]: // try others
default:
return "", err
}
- return selectProtosOrFail(protos[1:], rwc)
+ proto, err = selectProtosOrFail(protos[1:], rwc)
+ if _, ok := err.(ErrNotSupported[T]); ok {
+ return "", ErrNotSupported[T]{protos}
+ }
+ return proto, err
}
const simOpenProtocol = "/libp2p/simultaneous-connect"
// SelectWithSimopenOrFail performs protocol negotiation with the simultaneous open extension.
// The returned boolean indicator will be true if we should act as a server.
-func SelectWithSimopenOrFail(protos []string, rwc io.ReadWriteCloser) (proto string, isServer bool, err error) {
+func SelectWithSimopenOrFail[T StringLike](protos []T, rwc io.ReadWriteCloser) (proto T, isServer bool, err error) {
defer func() {
if rerr := recover(); rerr != nil {
fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
@@ -125,7 +142,7 @@ func SelectWithSimopenOrFail(protos []string, rwc io.ReadWriteCloser) (proto str
return "", false, err
}
- tok, err := ReadNextToken(rwc)
+ tok, err := ReadNextToken[T](rwc)
if err != nil {
return "", false, err
}
@@ -146,13 +163,13 @@ func SelectWithSimopenOrFail(protos []string, rwc io.ReadWriteCloser) (proto str
}
return proto, false, nil
default:
- return "", false, errors.New("unexpected response: " + tok)
+ return "", false, fmt.Errorf("unexpected response: %s", tok)
}
}
-func clientOpen(protos []string, rwc io.ReadWriteCloser) (string, error) {
+func clientOpen[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) {
// check to see if we selected the pipelined protocol
- tok, err := ReadNextToken(rwc)
+ tok, err := ReadNextToken[T](rwc)
if err != nil {
return "", err
}
@@ -161,27 +178,31 @@ func clientOpen(protos []string, rwc io.ReadWriteCloser) (string, error) {
case protos[0]:
return tok, nil
case "na":
- return selectProtosOrFail(protos[1:], rwc)
+ proto, err := selectProtosOrFail(protos[1:], rwc)
+ if _, ok := err.(ErrNotSupported[T]); ok {
+ return "", ErrNotSupported[T]{protos}
+ }
+ return proto, err
default:
- return "", errors.New("unexpected response: " + tok)
+ return "", fmt.Errorf("unexpected response: %s", tok)
}
}
-func selectProtosOrFail(protos []string, rwc io.ReadWriteCloser) (string, error) {
+func selectProtosOrFail[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) {
for _, p := range protos {
err := trySelect(p, rwc)
- switch err {
+ switch err := err.(type) {
case nil:
return p, nil
- case ErrNotSupported:
+ case ErrNotSupported[T]:
default:
return "", err
}
}
- return "", ErrNotSupported
+ return "", ErrNotSupported[T]{protos}
}
-func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) {
+func simOpen[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, bool, error) {
randBytes := make([]byte, 8)
_, err := rand.Read(randBytes)
if err != nil {
@@ -198,17 +219,17 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) {
// skip exactly one protocol
// see https://github.com/multiformats/go-multistream/pull/42#discussion_r558757135
- _, err = ReadNextToken(rwc)
+ _, err = ReadNextToken[T](rwc)
if err != nil {
return "", false, err
}
// read the tie breaker nonce
- tok, err := ReadNextToken(rwc)
+ tok, err := ReadNextToken[T](rwc)
if err != nil {
return "", false, err
}
- if !strings.HasPrefix(tok, tieBreakerPrefix) {
+ if !strings.HasPrefix(string(tok), tieBreakerPrefix) {
return "", false, errors.New("tie breaker nonce not sent with the correct prefix")
}
@@ -216,7 +237,7 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) {
return "", false, err
}
- peerNonce, err := strconv.ParseUint(tok[len(tieBreakerPrefix):], 10, 64)
+ peerNonce, err := strconv.ParseUint(string(tok[len(tieBreakerPrefix):]), 10, 64)
if err != nil {
return "", false, err
}
@@ -228,7 +249,7 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) {
}
iamserver = peerNonce > myNonce
- var proto string
+ var proto T
if iamserver {
proto, err = simOpenSelectServer(protos, rwc)
} else {
@@ -238,29 +259,28 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) {
return proto, iamserver, err
}
-func simOpenSelectServer(protos []string, rwc io.ReadWriteCloser) (string, error) {
+func simOpenSelectServer[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) {
werrCh := make(chan error, 1)
go func() {
err := delimWriteBuffered(rwc, []byte(responder))
werrCh <- err
}()
- tok, err := ReadNextToken(rwc)
+ tok, err := ReadNextToken[T](rwc)
if err != nil {
return "", err
}
if tok != initiator {
- return "", errors.New("unexpected response: " + tok)
+ return "", fmt.Errorf("unexpected response: %s", tok)
}
if err = <-werrCh; err != nil {
return "", err
}
-
for {
- tok, err = ReadNextToken(rwc)
+ tok, err = ReadNextToken[T](rwc)
if err == io.EOF {
- return "", ErrNotSupported
+ return "", ErrNotSupported[T]{protos}
}
if err != nil {
@@ -286,19 +306,19 @@ func simOpenSelectServer(protos []string, rwc io.ReadWriteCloser) (string, error
}
-func simOpenSelectClient(protos []string, rwc io.ReadWriteCloser) (string, error) {
+func simOpenSelectClient[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) {
werrCh := make(chan error, 1)
go func() {
err := delimWriteBuffered(rwc, []byte(initiator))
werrCh <- err
}()
- tok, err := ReadNextToken(rwc)
+ tok, err := ReadNextToken[T](rwc)
if err != nil {
return "", err
}
if tok != responder {
- return "", errors.New("unexpected response: " + tok)
+ return "", fmt.Errorf("unexpected response: %s", tok)
}
if err = <-werrCh; err != nil {
return "", err
@@ -308,7 +328,7 @@ func simOpenSelectClient(protos []string, rwc io.ReadWriteCloser) (string, error
}
func readMultistreamHeader(r io.Reader) error {
- tok, err := ReadNextToken(r)
+ tok, err := ReadNextToken[string](r)
if err != nil {
return err
}
@@ -319,7 +339,7 @@ func readMultistreamHeader(r io.Reader) error {
return nil
}
-func trySelect(proto string, rwc io.ReadWriteCloser) error {
+func trySelect[T StringLike](proto T, rwc io.ReadWriteCloser) error {
err := delimWriteBuffered(rwc, []byte(proto))
if err != nil {
return err
@@ -327,8 +347,8 @@ func trySelect(proto string, rwc io.ReadWriteCloser) error {
return readProto(proto, rwc)
}
-func readProto(proto string, r io.Reader) error {
- tok, err := ReadNextToken(r)
+func readProto[T StringLike](proto T, r io.Reader) error {
+ tok, err := ReadNextToken[T](r)
if err != nil {
return err
}
@@ -337,8 +357,8 @@ func readProto(proto string, r io.Reader) error {
case proto:
return nil
case "na":
- return ErrNotSupported
+ return ErrNotSupported[T]{[]T{proto}}
default:
- return errors.New("unrecognized response: " + tok)
+ return fmt.Errorf("unrecognized response: %s", tok)
}
}
diff --git a/vendor/github.com/multiformats/go-multistream/lazyClient.go b/vendor/github.com/multiformats/go-multistream/lazyClient.go
index 76d79ffe6..6145eafc2 100644
--- a/vendor/github.com/multiformats/go-multistream/lazyClient.go
+++ b/vendor/github.com/multiformats/go-multistream/lazyClient.go
@@ -8,9 +8,9 @@ import (
// NewMSSelect returns a new Multistream which is able to perform
// protocol selection with a MultistreamMuxer.
-func NewMSSelect(c io.ReadWriteCloser, proto string) LazyConn {
- return &lazyClientConn{
- protos: []string{ProtocolID, proto},
+func NewMSSelect[T StringLike](c io.ReadWriteCloser, proto T) LazyConn {
+ return &lazyClientConn[T]{
+ protos: []T{ProtocolID, proto},
con: c,
}
}
@@ -18,9 +18,9 @@ func NewMSSelect(c io.ReadWriteCloser, proto string) LazyConn {
// NewMultistream returns a multistream for the given protocol. This will not
// perform any protocol selection. If you are using a MultistreamMuxer, use
// NewMSSelect.
-func NewMultistream(c io.ReadWriteCloser, proto string) LazyConn {
- return &lazyClientConn{
- protos: []string{proto},
+func NewMultistream[T StringLike](c io.ReadWriteCloser, proto T) LazyConn {
+ return &lazyClientConn[T]{
+ protos: []T{proto},
con: c,
}
}
@@ -31,7 +31,7 @@ func NewMultistream(c io.ReadWriteCloser, proto string) LazyConn {
// It *does not* block writes waiting for the other end to respond. Instead, it
// simply assumes the negotiation went successfully and starts writing data.
// See: https://github.com/multiformats/go-multistream/issues/20
-type lazyClientConn struct {
+type lazyClientConn[T StringLike] struct {
// Used to ensure we only trigger the write half of the handshake once.
rhandshakeOnce sync.Once
rerr error
@@ -41,7 +41,7 @@ type lazyClientConn struct {
werr error
// The sequence of protocols to negotiate.
- protos []string
+ protos []T
// The inner connection.
con io.ReadWriteCloser
@@ -53,7 +53,7 @@ type lazyClientConn struct {
// half of the handshake and then waits for the read half to complete.
//
// It returns an error if the read half of the handshake fails.
-func (l *lazyClientConn) Read(b []byte) (int, error) {
+func (l *lazyClientConn[T]) Read(b []byte) (int, error) {
l.rhandshakeOnce.Do(func() {
go l.whandshakeOnce.Do(l.doWriteHandshake)
l.doReadHandshake()
@@ -68,17 +68,17 @@ func (l *lazyClientConn) Read(b []byte) (int, error) {
return l.con.Read(b)
}
-func (l *lazyClientConn) doReadHandshake() {
+func (l *lazyClientConn[T]) doReadHandshake() {
for _, proto := range l.protos {
// read protocol
- tok, err := ReadNextToken(l.con)
+ tok, err := ReadNextToken[T](l.con)
if err != nil {
l.rerr = err
return
}
if tok == "na" {
- l.rerr = ErrNotSupported
+ l.rerr = ErrNotSupported[T]{[]T{proto}}
return
}
if tok != proto {
@@ -88,12 +88,12 @@ func (l *lazyClientConn) doReadHandshake() {
}
}
-func (l *lazyClientConn) doWriteHandshake() {
+func (l *lazyClientConn[T]) doWriteHandshake() {
l.doWriteHandshakeWithData(nil)
}
// Perform the write handshake but *also* write some extra data.
-func (l *lazyClientConn) doWriteHandshakeWithData(extra []byte) int {
+func (l *lazyClientConn[T]) doWriteHandshakeWithData(extra []byte) int {
buf := getWriter(l.con)
defer putWriter(buf)
@@ -122,7 +122,7 @@ func (l *lazyClientConn) doWriteHandshakeWithData(extra []byte) int {
//
// Write *also* ignores errors from the read half of the handshake (in case the
// stream is actually write only).
-func (l *lazyClientConn) Write(b []byte) (int, error) {
+func (l *lazyClientConn[T]) Write(b []byte) (int, error) {
n := 0
l.whandshakeOnce.Do(func() {
go l.rhandshakeOnce.Do(l.doReadHandshake)
@@ -137,7 +137,7 @@ func (l *lazyClientConn) Write(b []byte) (int, error) {
// Close closes the underlying io.ReadWriteCloser
//
// This does not flush anything.
-func (l *lazyClientConn) Close() error {
+func (l *lazyClientConn[T]) Close() error {
// As the client, we flush the handshake on close to cover an
// interesting edge-case where the server only speaks a single protocol
// and responds eagerly with that protocol before waiting for out
@@ -151,7 +151,7 @@ func (l *lazyClientConn) Close() error {
}
// Flush sends the handshake.
-func (l *lazyClientConn) Flush() error {
+func (l *lazyClientConn[T]) Flush() error {
l.whandshakeOnce.Do(func() {
go l.rhandshakeOnce.Do(l.doReadHandshake)
l.doWriteHandshake()
diff --git a/vendor/github.com/multiformats/go-multistream/multistream.go b/vendor/github.com/multiformats/go-multistream/multistream.go
index 9f3a1a4ca..17e1ef796 100644
--- a/vendor/github.com/multiformats/go-multistream/multistream.go
+++ b/vendor/github.com/multiformats/go-multistream/multistream.go
@@ -7,10 +7,9 @@ import (
"bufio"
"errors"
"fmt"
+ "io"
"os"
"runtime/debug"
-
- "io"
"sync"
"github.com/multiformats/go-varint"
@@ -29,29 +28,35 @@ var writerPool = sync.Pool{
},
}
+// StringLike is an interface that supports all types with underlying type
+// string
+type StringLike interface {
+ ~string
+}
+
// HandlerFunc is a user-provided function used by the MultistreamMuxer to
// handle a protocol/stream.
-type HandlerFunc = func(protocol string, rwc io.ReadWriteCloser) error
+type HandlerFunc[T StringLike] func(protocol T, rwc io.ReadWriteCloser) error
// Handler is a wrapper to HandlerFunc which attaches a name (protocol) and a
// match function which can optionally be used to select a handler by other
// means than the name.
-type Handler struct {
- MatchFunc func(string) bool
- Handle HandlerFunc
- AddName string
+type Handler[T StringLike] struct {
+ MatchFunc func(T) bool
+ Handle HandlerFunc[T]
+ AddName T
}
// MultistreamMuxer is a muxer for multistream. Depending on the stream
// protocol tag it will select the right handler and hand the stream off to it.
-type MultistreamMuxer struct {
+type MultistreamMuxer[T StringLike] struct {
handlerlock sync.RWMutex
- handlers []Handler
+ handlers []Handler[T]
}
// NewMultistreamMuxer creates a muxer.
-func NewMultistreamMuxer() *MultistreamMuxer {
- return new(MultistreamMuxer)
+func NewMultistreamMuxer[T StringLike]() *MultistreamMuxer[T] {
+ return new(MultistreamMuxer[T])
}
// LazyConn is the connection type returned by the lazy negotiation functions.
@@ -111,26 +116,26 @@ func delimWrite(w io.Writer, mes []byte) error {
return nil
}
-func fulltextMatch(s string) func(string) bool {
- return func(a string) bool {
+func fulltextMatch[T StringLike](s T) func(T) bool {
+ return func(a T) bool {
return a == s
}
}
// AddHandler attaches a new protocol handler to the muxer.
-func (msm *MultistreamMuxer) AddHandler(protocol string, handler HandlerFunc) {
+func (msm *MultistreamMuxer[T]) AddHandler(protocol T, handler HandlerFunc[T]) {
msm.AddHandlerWithFunc(protocol, fulltextMatch(protocol), handler)
}
// AddHandlerWithFunc attaches a new protocol handler to the muxer with a match.
// If the match function returns true for a given protocol tag, the protocol
// will be selected even if the handler name and protocol tags are different.
-func (msm *MultistreamMuxer) AddHandlerWithFunc(protocol string, match func(string) bool, handler HandlerFunc) {
+func (msm *MultistreamMuxer[T]) AddHandlerWithFunc(protocol T, match func(T) bool, handler HandlerFunc[T]) {
msm.handlerlock.Lock()
defer msm.handlerlock.Unlock()
msm.removeHandler(protocol)
- msm.handlers = append(msm.handlers, Handler{
+ msm.handlers = append(msm.handlers, Handler[T]{
MatchFunc: match,
Handle: handler,
AddName: protocol,
@@ -138,14 +143,14 @@ func (msm *MultistreamMuxer) AddHandlerWithFunc(protocol string, match func(stri
}
// RemoveHandler removes the handler with the given name from the muxer.
-func (msm *MultistreamMuxer) RemoveHandler(protocol string) {
+func (msm *MultistreamMuxer[T]) RemoveHandler(protocol T) {
msm.handlerlock.Lock()
defer msm.handlerlock.Unlock()
msm.removeHandler(protocol)
}
-func (msm *MultistreamMuxer) removeHandler(protocol string) {
+func (msm *MultistreamMuxer[T]) removeHandler(protocol T) {
for i, h := range msm.handlers {
if h.AddName == protocol {
msm.handlers = append(msm.handlers[:i], msm.handlers[i+1:]...)
@@ -155,11 +160,11 @@ func (msm *MultistreamMuxer) removeHandler(protocol string) {
}
// Protocols returns the list of handler-names added to this this muxer.
-func (msm *MultistreamMuxer) Protocols() []string {
+func (msm *MultistreamMuxer[T]) Protocols() []T {
msm.handlerlock.RLock()
defer msm.handlerlock.RUnlock()
- var out []string
+ var out []T
for _, h := range msm.handlers {
out = append(out, h.AddName)
}
@@ -171,7 +176,7 @@ func (msm *MultistreamMuxer) Protocols() []string {
// fails because of a ProtocolID mismatch.
var ErrIncorrectVersion = errors.New("client connected with incorrect version")
-func (msm *MultistreamMuxer) findHandler(proto string) *Handler {
+func (msm *MultistreamMuxer[T]) findHandler(proto T) *Handler[T] {
msm.handlerlock.RLock()
defer msm.handlerlock.RUnlock()
@@ -184,19 +189,9 @@ func (msm *MultistreamMuxer) findHandler(proto string) *Handler {
return nil
}
-// NegotiateLazy performs protocol selection and returns
-// a multistream, the protocol used, the handler and an error. It is lazy
-// because the write-handshake is performed on a subroutine, allowing this
-// to return before that handshake is completed.
-// Deprecated: use Negotiate instead.
-func (msm *MultistreamMuxer) NegotiateLazy(rwc io.ReadWriteCloser) (rwc_ io.ReadWriteCloser, proto string, handler HandlerFunc, err error) {
- proto, handler, err = msm.Negotiate(rwc)
- return rwc, proto, handler, err
-}
-
// Negotiate performs protocol selection and returns the protocol name and
// the matching handler function for it (or an error).
-func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (proto string, handler HandlerFunc, err error) {
+func (msm *MultistreamMuxer[T]) Negotiate(rwc io.ReadWriteCloser) (proto T, handler HandlerFunc[T], err error) {
defer func() {
if rerr := recover(); rerr != nil {
fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
@@ -209,8 +204,7 @@ func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (proto string, ha
// other side has closed this rwc for writing. They may have sent us a
// message and closed. Future writers will get an error anyways.
_ = delimWriteBuffered(rwc, []byte(ProtocolID))
-
- line, err := ReadNextToken(rwc)
+ line, err := ReadNextToken[T](rwc)
if err != nil {
return "", nil, err
}
@@ -223,7 +217,7 @@ func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (proto string, ha
loop:
for {
// Now read and respond to commands until they send a valid protocol id
- tok, err := ReadNextToken(rwc)
+ tok, err := ReadNextToken[T](rwc)
if err != nil {
return "", nil, err
}
@@ -250,7 +244,7 @@ loop:
// Handle performs protocol negotiation on a ReadWriteCloser
// (i.e. a connection). It will find a matching handler for the
// incoming protocol and pass the ReadWriteCloser to it.
-func (msm *MultistreamMuxer) Handle(rwc io.ReadWriteCloser) error {
+func (msm *MultistreamMuxer[T]) Handle(rwc io.ReadWriteCloser) error {
p, h, err := msm.Negotiate(rwc)
if err != nil {
return err
@@ -260,13 +254,13 @@ func (msm *MultistreamMuxer) Handle(rwc io.ReadWriteCloser) error {
// ReadNextToken extracts a token from a Reader. It is used during
// protocol negotiation and returns a string.
-func ReadNextToken(r io.Reader) (string, error) {
+func ReadNextToken[T StringLike](r io.Reader) (T, error) {
tok, err := ReadNextTokenBytes(r)
if err != nil {
return "", err
}
- return string(tok), nil
+ return T(tok), nil
}
// ReadNextTokenBytes extracts a token from a Reader. It is used
diff --git a/vendor/github.com/multiformats/go-multistream/multistream_fuzz.go b/vendor/github.com/multiformats/go-multistream/multistream_fuzz.go
deleted file mode 100644
index 3d65ca600..000000000
--- a/vendor/github.com/multiformats/go-multistream/multistream_fuzz.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//go:build gofuzz
-// +build gofuzz
-
-package multistream
-
-import "bytes"
-
-type rwc struct {
- *bytes.Reader
-}
-
-func (*rwc) Write(b []byte) (int, error) {
- return len(b), nil
-}
-
-func (*rwc) Close() error {
- return nil
-}
-
-func Fuzz(b []byte) int {
- readStream := bytes.NewReader(b)
- input := &rwc{readStream}
-
- mux := NewMultistreamMuxer()
- mux.AddHandler("/a", nil)
- mux.AddHandler("/b", nil)
- _ = mux.Handle(input)
- return 1
-}
diff --git a/vendor/github.com/multiformats/go-multistream/version.json b/vendor/github.com/multiformats/go-multistream/version.json
index 1f94dbba4..26a7d4785 100644
--- a/vendor/github.com/multiformats/go-multistream/version.json
+++ b/vendor/github.com/multiformats/go-multistream/version.json
@@ -1,3 +1,3 @@
{
- "version": "v0.3.3"
+ "version": "v0.4.1"
}
diff --git a/vendor/github.com/multiformats/go-varint/.travis.yml b/vendor/github.com/multiformats/go-varint/.travis.yml
deleted file mode 100644
index 248d09b67..000000000
--- a/vendor/github.com/multiformats/go-varint/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-os:
- - linux
-
-language: go
-
-go:
- - 1.11.x
-
-env:
- global:
- - GOTFLAGS="-race"
- - GO111MODULE=on
- matrix:
- - BUILD_DEPTYPE=gomod
-
-
-# disable travis install
-install:
- - true
-
-script:
- - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh)
-
-cache:
- directories:
- - $GOPATH/pkg/mod
- - /home/travis/.cache/go-build
-
-notifications:
- email: false
diff --git a/vendor/github.com/multiformats/go-varint/varint.go b/vendor/github.com/multiformats/go-varint/varint.go
index 47340d9b2..f0e85d71f 100644
--- a/vendor/github.com/multiformats/go-varint/varint.go
+++ b/vendor/github.com/multiformats/go-varint/varint.go
@@ -76,10 +76,10 @@ func ReadUvarint(r io.ByteReader) (uint64, error) {
// released under the BSD License.
var x uint64
var s uint
- for i := 0; ; i++ {
+ for s = 0; ; s += 7 {
b, err := r.ReadByte()
if err != nil {
- if err == io.EOF && i != 0 {
+ if err == io.EOF && s != 0 {
// "eof" will look like a success.
// If we've read part of a value, this is not a
// success.
@@ -87,7 +87,7 @@ func ReadUvarint(r io.ByteReader) (uint64, error) {
}
return 0, err
}
- if (i == 8 && b >= 0x80) || i >= MaxLenUvarint63 {
+ if (s == 56 && b >= 0x80) || s >= (7*MaxLenUvarint63) {
// this is the 9th and last byte we're willing to read, but it
// signals there's more (1 in MSB).
// or this is the >= 10th byte, and for some reason we're still here.
@@ -100,7 +100,6 @@ func ReadUvarint(r io.ByteReader) (uint64, error) {
return x | uint64(b)< 0 {
- elapsed := now.Sub(b.Lastupdate)
-
- b.Fill -= float64(elapsed) / float64(b.LeakInterval)
- if b.Fill < 0 {
- b.Fill = 0
- }
- }
- b.Lastupdate = now
-}
-
-func (b *LeakyBucket) Pour(amount uint16) bool {
- b.updateFill()
-
- var newfill float64 = b.Fill + float64(amount)
-
- if newfill > float64(b.Size) {
- return false
- }
-
- b.Fill = newfill
-
- return true
-}
-
-// The time at which this bucket will be completely drained
-func (b *LeakyBucket) DrainedAt() time.Time {
- return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
-}
-
-// The duration until this bucket is completely drained
-func (b *LeakyBucket) TimeToDrain() time.Duration {
- return b.DrainedAt().Sub(b.Now())
-}
-
-func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
- return b.Now().Sub(b.Lastupdate)
-}
-
-type LeakyBucketSer struct {
- Size uint16
- Fill float64
- LeakInterval time.Duration // time.Duration for 1 unit of size to leak
- Lastupdate time.Time
-}
-
-func (b *LeakyBucket) Serialise() *LeakyBucketSer {
- bucket := LeakyBucketSer{
- Size: b.Size,
- Fill: b.Fill,
- LeakInterval: b.LeakInterval,
- Lastupdate: b.Lastupdate,
- }
-
- return &bucket
-}
-
-func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
- bucket := LeakyBucket{
- Size: b.Size,
- Fill: b.Fill,
- LeakInterval: b.LeakInterval,
- Lastupdate: b.Lastupdate,
- Now: time.Now,
- }
-
- return &bucket
-}
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go
deleted file mode 100644
index bf3c2131b..000000000
--- a/vendor/github.com/nxadm/tail/ratelimiter/memory.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package ratelimiter
-
-import (
- "errors"
- "time"
-)
-
-const (
- GC_SIZE int = 100
- GC_PERIOD time.Duration = 60 * time.Second
-)
-
-type Memory struct {
- store map[string]LeakyBucket
- lastGCCollected time.Time
-}
-
-func NewMemory() *Memory {
- m := new(Memory)
- m.store = make(map[string]LeakyBucket)
- m.lastGCCollected = time.Now()
- return m
-}
-
-func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
-
- bucket, ok := m.store[key]
- if !ok {
- return nil, errors.New("miss")
- }
-
- return &bucket, nil
-}
-
-func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
-
- if len(m.store) > GC_SIZE {
- m.GarbageCollect()
- }
-
- m.store[key] = bucket
-
- return nil
-}
-
-func (m *Memory) GarbageCollect() {
- now := time.Now()
-
- // rate limit GC to once per minute
- if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
- for key, bucket := range m.store {
- // if the bucket is drained, then GC
- if bucket.DrainedAt().Unix() < now.Unix() {
- delete(m.store, key)
- }
- }
-
- m.lastGCCollected = now
- }
-}
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go
deleted file mode 100644
index 89b2fe882..000000000
--- a/vendor/github.com/nxadm/tail/ratelimiter/storage.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package ratelimiter
-
-type Storage interface {
- GetBucketFor(string) (*LeakyBucket, error)
- SetBucketFor(string, LeakyBucket) error
-}
diff --git a/vendor/github.com/nxadm/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go
deleted file mode 100644
index 37ea4411e..000000000
--- a/vendor/github.com/nxadm/tail/tail.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-//nxadm/tail provides a Go library that emulates the features of the BSD `tail`
-//program. The library comes with full support for truncation/move detection as
-//it is designed to work with log rotation tools. The library works on all
-//operating systems supported by Go, including POSIX systems like Linux and
-//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported.
-package tail
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/nxadm/tail/ratelimiter"
- "github.com/nxadm/tail/util"
- "github.com/nxadm/tail/watch"
- "gopkg.in/tomb.v1"
-)
-
-var (
- // ErrStop is returned when the tail of a file has been marked to be stopped.
- ErrStop = errors.New("tail should now stop")
-)
-
-type Line struct {
- Text string // The contents of the file
- Num int // The line number
- SeekInfo SeekInfo // SeekInfo
- Time time.Time // Present time
- Err error // Error from tail
-}
-
-// Deprecated: this function is no longer used internally and it has little of no
-// use in the API. As such, it will be removed from the API in a future major
-// release.
-//
-// NewLine returns a * pointer to a Line struct.
-func NewLine(text string, lineNum int) *Line {
- return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
-}
-
-// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek
-type SeekInfo struct {
- Offset int64
- Whence int
-}
-
-type logger interface {
- Fatal(v ...interface{})
- Fatalf(format string, v ...interface{})
- Fatalln(v ...interface{})
- Panic(v ...interface{})
- Panicf(format string, v ...interface{})
- Panicln(v ...interface{})
- Print(v ...interface{})
- Printf(format string, v ...interface{})
- Println(v ...interface{})
-}
-
-// Config is used to specify how a file must be tailed.
-type Config struct {
- // File-specifc
- Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file
- ReOpen bool // Reopen recreated files (tail -F)
- MustExist bool // Fail early if the file does not exist
- Poll bool // Poll for file changes instead of using the default inotify
- Pipe bool // The file is a named pipe (mkfifo)
-
- // Generic IO
- Follow bool // Continue looking for new lines (tail -f)
- MaxLineSize int // If non-zero, split longer lines into multiple lines
-
- // Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function)
- RateLimiter *ratelimiter.LeakyBucket
-
- // Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger.
- // To disable logging, set it to tail.DiscardingLogger
- Logger logger
-}
-
-type Tail struct {
- Filename string // The filename
- Lines chan *Line // A consumable channel of *Line
- Config // Tail.Configuration
-
- file *os.File
- reader *bufio.Reader
- lineNum int
-
- watcher watch.FileWatcher
- changes *watch.FileChanges
-
- tomb.Tomb // provides: Done, Kill, Dying
-
- lk sync.Mutex
-}
-
-var (
- // DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil
- DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
- // DiscardingLogger can be used to disable logging output
- DiscardingLogger = log.New(ioutil.Discard, "", 0)
-)
-
-// TailFile begins tailing the file. And returns a pointer to a Tail struct
-// and an error. An output stream is made available via the Tail.Lines
-// channel (e.g. to be looped and printed). To handle errors during tailing,
-// after finishing reading from the Lines channel, invoke the `Wait` or `Err`
-// method on the returned *Tail.
-func TailFile(filename string, config Config) (*Tail, error) {
- if config.ReOpen && !config.Follow {
- util.Fatal("cannot set ReOpen without Follow.")
- }
-
- t := &Tail{
- Filename: filename,
- Lines: make(chan *Line),
- Config: config,
- }
-
- // when Logger was not specified in config, use default logger
- if t.Logger == nil {
- t.Logger = DefaultLogger
- }
-
- if t.Poll {
- t.watcher = watch.NewPollingFileWatcher(filename)
- } else {
- t.watcher = watch.NewInotifyFileWatcher(filename)
- }
-
- if t.MustExist {
- var err error
- t.file, err = OpenFile(t.Filename)
- if err != nil {
- return nil, err
- }
- }
-
- go t.tailFileSync()
-
- return t, nil
-}
-
-// Tell returns the file's current position, like stdio's ftell() and an error.
-// Beware that this value may not be completely accurate because one line from
-// the chan(tail.Lines) may have been read already.
-func (tail *Tail) Tell() (offset int64, err error) {
- if tail.file == nil {
- return
- }
- offset, err = tail.file.Seek(0, io.SeekCurrent)
- if err != nil {
- return
- }
-
- tail.lk.Lock()
- defer tail.lk.Unlock()
- if tail.reader == nil {
- return
- }
-
- offset -= int64(tail.reader.Buffered())
- return
-}
-
-// Stop stops the tailing activity.
-func (tail *Tail) Stop() error {
- tail.Kill(nil)
- return tail.Wait()
-}
-
-// StopAtEOF stops tailing as soon as the end of the file is reached. The function
-// returns an error,
-func (tail *Tail) StopAtEOF() error {
- tail.Kill(errStopAtEOF)
- return tail.Wait()
-}
-
-var errStopAtEOF = errors.New("tail: stop at eof")
-
-func (tail *Tail) close() {
- close(tail.Lines)
- tail.closeFile()
-}
-
-func (tail *Tail) closeFile() {
- if tail.file != nil {
- tail.file.Close()
- tail.file = nil
- }
-}
-
-func (tail *Tail) reopen() error {
- tail.closeFile()
- tail.lineNum = 0
- for {
- var err error
- tail.file, err = OpenFile(tail.Filename)
- if err != nil {
- if os.IsNotExist(err) {
- tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
- if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
- if err == tomb.ErrDying {
- return err
- }
- return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
- }
- continue
- }
- return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
- }
- break
- }
- return nil
-}
-
-func (tail *Tail) readLine() (string, error) {
- tail.lk.Lock()
- line, err := tail.reader.ReadString('\n')
- tail.lk.Unlock()
- if err != nil {
- // Note ReadString "returns the data read before the error" in
- // case of an error, including EOF, so we return it as is. The
- // caller is expected to process it if err is EOF.
- return line, err
- }
-
- line = strings.TrimRight(line, "\n")
-
- return line, err
-}
-
-func (tail *Tail) tailFileSync() {
- defer tail.Done()
- defer tail.close()
-
- if !tail.MustExist {
- // deferred first open.
- err := tail.reopen()
- if err != nil {
- if err != tomb.ErrDying {
- tail.Kill(err)
- }
- return
- }
- }
-
- // Seek to requested location on first open of the file.
- if tail.Location != nil {
- _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
- if err != nil {
- tail.Killf("Seek error on %s: %s", tail.Filename, err)
- return
- }
- }
-
- tail.openReader()
-
- // Read line by line.
- for {
- // do not seek in named pipes
- if !tail.Pipe {
- // grab the position in case we need to back up in the event of a half-line
- if _, err := tail.Tell(); err != nil {
- tail.Kill(err)
- return
- }
- }
-
- line, err := tail.readLine()
-
- // Process `line` even if err is EOF.
- if err == nil {
- cooloff := !tail.sendLine(line)
- if cooloff {
- // Wait a second before seeking till the end of
- // file when rate limit is reached.
- msg := ("Too much log activity; waiting a second before resuming tailing")
- offset, _ := tail.Tell()
- tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
- select {
- case <-time.After(time.Second):
- case <-tail.Dying():
- return
- }
- if err := tail.seekEnd(); err != nil {
- tail.Kill(err)
- return
- }
- }
- } else if err == io.EOF {
- if !tail.Follow {
- if line != "" {
- tail.sendLine(line)
- }
- return
- }
-
- if tail.Follow && line != "" {
- tail.sendLine(line)
- if err := tail.seekEnd(); err != nil {
- tail.Kill(err)
- return
- }
- }
-
- // When EOF is reached, wait for more data to become
- // available. Wait strategy is based on the `tail.watcher`
- // implementation (inotify or polling).
- err := tail.waitForChanges()
- if err != nil {
- if err != ErrStop {
- tail.Kill(err)
- }
- return
- }
- } else {
- // non-EOF error
- tail.Killf("Error reading %s: %s", tail.Filename, err)
- return
- }
-
- select {
- case <-tail.Dying():
- if tail.Err() == errStopAtEOF {
- continue
- }
- return
- default:
- }
- }
-}
-
-// waitForChanges waits until the file has been appended, deleted,
-// moved or truncated. When moved or deleted - the file will be
-// reopened if ReOpen is true. Truncated files are always reopened.
-func (tail *Tail) waitForChanges() error {
- if tail.changes == nil {
- pos, err := tail.file.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
- if err != nil {
- return err
- }
- }
-
- select {
- case <-tail.changes.Modified:
- return nil
- case <-tail.changes.Deleted:
- tail.changes = nil
- if tail.ReOpen {
- // XXX: we must not log from a library.
- tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
- if err := tail.reopen(); err != nil {
- return err
- }
- tail.Logger.Printf("Successfully reopened %s", tail.Filename)
- tail.openReader()
- return nil
- }
- tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
- return ErrStop
- case <-tail.changes.Truncated:
- // Always reopen truncated files (Follow is true)
- tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
- if err := tail.reopen(); err != nil {
- return err
- }
- tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
- tail.openReader()
- return nil
- case <-tail.Dying():
- return ErrStop
- }
-}
-
-func (tail *Tail) openReader() {
- tail.lk.Lock()
- if tail.MaxLineSize > 0 {
- // add 2 to account for newline characters
- tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
- } else {
- tail.reader = bufio.NewReader(tail.file)
- }
- tail.lk.Unlock()
-}
-
-func (tail *Tail) seekEnd() error {
- return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
-}
-
-func (tail *Tail) seekTo(pos SeekInfo) error {
- _, err := tail.file.Seek(pos.Offset, pos.Whence)
- if err != nil {
- return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
- }
- // Reset the read buffer whenever the file is re-seek'ed
- tail.reader.Reset(tail.file)
- return nil
-}
-
-// sendLine sends the line(s) to Lines channel, splitting longer lines
-// if necessary. Return false if rate limit is reached.
-func (tail *Tail) sendLine(line string) bool {
- now := time.Now()
- lines := []string{line}
-
- // Split longer lines
- if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
- lines = util.PartitionString(line, tail.MaxLineSize)
- }
-
- for _, line := range lines {
- tail.lineNum++
- offset, _ := tail.Tell()
- select {
- case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
- case <-tail.Dying():
- return true
- }
- }
-
- if tail.Config.RateLimiter != nil {
- ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
- if !ok {
- tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
- tail.Filename)
- return false
- }
- }
-
- return true
-}
-
-// Cleanup removes inotify watches added by the tail package. This function is
-// meant to be invoked from a process's exit handler. Linux kernel may not
-// automatically remove inotify watches after the process exits.
-// If you plan to re-read a file, don't call Cleanup in between.
-func (tail *Tail) Cleanup() {
- watch.Cleanup(tail.Filename)
-}
diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go
deleted file mode 100644
index 23e071dea..000000000
--- a/vendor/github.com/nxadm/tail/tail_posix.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// +build !windows
-
-package tail
-
-import (
- "os"
-)
-
-// Deprecated: this function is only useful internally and, as such,
-// it will be removed from the API in a future major release.
-//
-// OpenFile proxies a os.Open call for a file so it can be correctly tailed
-// on POSIX and non-POSIX OSes like MS Windows.
-func OpenFile(name string) (file *os.File, err error) {
- return os.Open(name)
-}
diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go
deleted file mode 100644
index da0d2f39c..000000000
--- a/vendor/github.com/nxadm/tail/tail_windows.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// +build windows
-
-package tail
-
-import (
- "os"
-
- "github.com/nxadm/tail/winfile"
-)
-
-// Deprecated: this function is only useful internally and, as such,
-// it will be removed from the API in a future major release.
-//
-// OpenFile proxies a os.Open call for a file so it can be correctly tailed
-// on POSIX and non-POSIX OSes like MS Windows.
-func OpenFile(name string) (file *os.File, err error) {
- return winfile.OpenFile(name, os.O_RDONLY, 0)
-}
diff --git a/vendor/github.com/nxadm/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go
deleted file mode 100644
index b64caa212..000000000
--- a/vendor/github.com/nxadm/tail/util/util.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package util
-
-import (
- "fmt"
- "log"
- "os"
- "runtime/debug"
-)
-
-type Logger struct {
- *log.Logger
-}
-
-var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
-
-// fatal is like panic except it displays only the current goroutine's stack.
-func Fatal(format string, v ...interface{}) {
- // https://github.com/nxadm/log/blob/master/log.go#L45
- LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
- os.Exit(1)
-}
-
-// partitionString partitions the string into chunks of given size,
-// with the last chunk of variable size.
-func PartitionString(s string, chunkSize int) []string {
- if chunkSize <= 0 {
- panic("invalid chunkSize")
- }
- length := len(s)
- chunks := 1 + length/chunkSize
- start := 0
- end := chunkSize
- parts := make([]string, 0, chunks)
- for {
- if end > length {
- end = length
- }
- parts = append(parts, s[start:end])
- if end == length {
- break
- }
- start, end = end, end+chunkSize
- }
- return parts
-}
diff --git a/vendor/github.com/nxadm/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go
deleted file mode 100644
index 5b65f42ae..000000000
--- a/vendor/github.com/nxadm/tail/watch/filechanges.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-package watch
-
-type FileChanges struct {
- Modified chan bool // Channel to get notified of modifications
- Truncated chan bool // Channel to get notified of truncations
- Deleted chan bool // Channel to get notified of deletions/renames
-}
-
-func NewFileChanges() *FileChanges {
- return &FileChanges{
- make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
-}
-
-func (fc *FileChanges) NotifyModified() {
- sendOnlyIfEmpty(fc.Modified)
-}
-
-func (fc *FileChanges) NotifyTruncated() {
- sendOnlyIfEmpty(fc.Truncated)
-}
-
-func (fc *FileChanges) NotifyDeleted() {
- sendOnlyIfEmpty(fc.Deleted)
-}
-
-// sendOnlyIfEmpty sends on a bool channel only if the channel has no
-// backlog to be read by other goroutines. This concurrency pattern
-// can be used to notify other goroutines if and only if they are
-// looking for it (i.e., subsequent notifications can be compressed
-// into one).
-func sendOnlyIfEmpty(ch chan bool) {
- select {
- case ch <- true:
- default:
- }
-}
diff --git a/vendor/github.com/nxadm/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go
deleted file mode 100644
index cbd11ad8d..000000000
--- a/vendor/github.com/nxadm/tail/watch/inotify.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/nxadm/tail/util"
-
- "github.com/fsnotify/fsnotify"
- "gopkg.in/tomb.v1"
-)
-
-// InotifyFileWatcher uses inotify to monitor file changes.
-type InotifyFileWatcher struct {
- Filename string
- Size int64
-}
-
-func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
- fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
- return fw
-}
-
-func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
- err := WatchCreate(fw.Filename)
- if err != nil {
- return err
- }
- defer RemoveWatchCreate(fw.Filename)
-
- // Do a real check now as the file might have been created before
- // calling `WatchFlags` above.
- if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
- // file exists, or stat returned an error.
- return err
- }
-
- events := Events(fw.Filename)
-
- for {
- select {
- case evt, ok := <-events:
- if !ok {
- return fmt.Errorf("inotify watcher has been closed")
- }
- evtName, err := filepath.Abs(evt.Name)
- if err != nil {
- return err
- }
- fwFilename, err := filepath.Abs(fw.Filename)
- if err != nil {
- return err
- }
- if evtName == fwFilename {
- return nil
- }
- case <-t.Dying():
- return tomb.ErrDying
- }
- }
- panic("unreachable")
-}
-
-func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
- err := Watch(fw.Filename)
- if err != nil {
- return nil, err
- }
-
- changes := NewFileChanges()
- fw.Size = pos
-
- go func() {
-
- events := Events(fw.Filename)
-
- for {
- prevSize := fw.Size
-
- var evt fsnotify.Event
- var ok bool
-
- select {
- case evt, ok = <-events:
- if !ok {
- RemoveWatch(fw.Filename)
- return
- }
- case <-t.Dying():
- RemoveWatch(fw.Filename)
- return
- }
-
- switch {
- case evt.Op&fsnotify.Remove == fsnotify.Remove:
- fallthrough
-
- case evt.Op&fsnotify.Rename == fsnotify.Rename:
- RemoveWatch(fw.Filename)
- changes.NotifyDeleted()
- return
-
- //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
- case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
- fallthrough
-
- case evt.Op&fsnotify.Write == fsnotify.Write:
- fi, err := os.Stat(fw.Filename)
- if err != nil {
- if os.IsNotExist(err) {
- RemoveWatch(fw.Filename)
- changes.NotifyDeleted()
- return
- }
- // XXX: report this error back to the user
- util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
- }
- fw.Size = fi.Size()
-
- if prevSize > 0 && prevSize > fw.Size {
- changes.NotifyTruncated()
- } else {
- changes.NotifyModified()
- }
- prevSize = fw.Size
- }
- }
- }()
-
- return changes, nil
-}
diff --git a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
deleted file mode 100644
index cb9572a03..000000000
--- a/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "log"
- "os"
- "path/filepath"
- "sync"
- "syscall"
-
- "github.com/nxadm/tail/util"
-
- "github.com/fsnotify/fsnotify"
-)
-
-type InotifyTracker struct {
- mux sync.Mutex
- watcher *fsnotify.Watcher
- chans map[string]chan fsnotify.Event
- done map[string]chan bool
- watchNums map[string]int
- watch chan *watchInfo
- remove chan *watchInfo
- error chan error
-}
-
-type watchInfo struct {
- op fsnotify.Op
- fname string
-}
-
-func (this *watchInfo) isCreate() bool {
- return this.op == fsnotify.Create
-}
-
-var (
- // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
- shared *InotifyTracker
-
- // these are used to ensure the shared InotifyTracker is run exactly once
- once = sync.Once{}
- goRun = func() {
- shared = &InotifyTracker{
- mux: sync.Mutex{},
- chans: make(map[string]chan fsnotify.Event),
- done: make(map[string]chan bool),
- watchNums: make(map[string]int),
- watch: make(chan *watchInfo),
- remove: make(chan *watchInfo),
- error: make(chan error),
- }
- go shared.run()
- }
-
- logger = log.New(os.Stderr, "", log.LstdFlags)
-)
-
-// Watch signals the run goroutine to begin watching the input filename
-func Watch(fname string) error {
- return watch(&watchInfo{
- fname: fname,
- })
-}
-
-// Watch create signals the run goroutine to begin watching the input filename
-// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
-func WatchCreate(fname string) error {
- return watch(&watchInfo{
- op: fsnotify.Create,
- fname: fname,
- })
-}
-
-func watch(winfo *watchInfo) error {
- // start running the shared InotifyTracker if not already running
- once.Do(goRun)
-
- winfo.fname = filepath.Clean(winfo.fname)
- shared.watch <- winfo
- return <-shared.error
-}
-
-// RemoveWatch signals the run goroutine to remove the watch for the input filename
-func RemoveWatch(fname string) error {
- return remove(&watchInfo{
- fname: fname,
- })
-}
-
-// RemoveWatch create signals the run goroutine to remove the watch for the input filename
-func RemoveWatchCreate(fname string) error {
- return remove(&watchInfo{
- op: fsnotify.Create,
- fname: fname,
- })
-}
-
-func remove(winfo *watchInfo) error {
- // start running the shared InotifyTracker if not already running
- once.Do(goRun)
-
- winfo.fname = filepath.Clean(winfo.fname)
- shared.mux.Lock()
- done := shared.done[winfo.fname]
- if done != nil {
- delete(shared.done, winfo.fname)
- close(done)
- }
- shared.mux.Unlock()
-
- shared.remove <- winfo
- return <-shared.error
-}
-
-// Events returns a channel to which FileEvents corresponding to the input filename
-// will be sent. This channel will be closed when removeWatch is called on this
-// filename.
-func Events(fname string) <-chan fsnotify.Event {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- return shared.chans[fname]
-}
-
-// Cleanup removes the watch for the input filename if necessary.
-func Cleanup(fname string) error {
- return RemoveWatch(fname)
-}
-
-// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
-// a new Watcher if the previous Watcher was closed.
-func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- if shared.chans[winfo.fname] == nil {
- shared.chans[winfo.fname] = make(chan fsnotify.Event)
- }
- if shared.done[winfo.fname] == nil {
- shared.done[winfo.fname] = make(chan bool)
- }
-
- fname := winfo.fname
- if winfo.isCreate() {
- // Watch for new files to be created in the parent directory.
- fname = filepath.Dir(fname)
- }
-
- var err error
- // already in inotify watch
- if shared.watchNums[fname] == 0 {
- err = shared.watcher.Add(fname)
- }
- if err == nil {
- shared.watchNums[fname]++
- }
- return err
-}
-
-// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
-// corresponding events channel.
-func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
- shared.mux.Lock()
-
- ch := shared.chans[winfo.fname]
- if ch != nil {
- delete(shared.chans, winfo.fname)
- close(ch)
- }
-
- fname := winfo.fname
- if winfo.isCreate() {
- // Watch for new files to be created in the parent directory.
- fname = filepath.Dir(fname)
- }
- shared.watchNums[fname]--
- watchNum := shared.watchNums[fname]
- if watchNum == 0 {
- delete(shared.watchNums, fname)
- }
- shared.mux.Unlock()
-
- var err error
- // If we were the last ones to watch this file, unsubscribe from inotify.
- // This needs to happen after releasing the lock because fsnotify waits
- // synchronously for the kernel to acknowledge the removal of the watch
- // for this file, which causes us to deadlock if we still held the lock.
- if watchNum == 0 {
- err = shared.watcher.Remove(fname)
- }
-
- return err
-}
-
-// sendEvent sends the input event to the appropriate Tail.
-func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
- name := filepath.Clean(event.Name)
-
- shared.mux.Lock()
- ch := shared.chans[name]
- done := shared.done[name]
- shared.mux.Unlock()
-
- if ch != nil && done != nil {
- select {
- case ch <- event:
- case <-done:
- }
- }
-}
-
-// run starts the goroutine in which the shared struct reads events from its
-// Watcher's Event channel and sends the events to the appropriate Tail.
-func (shared *InotifyTracker) run() {
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- util.Fatal("failed to create Watcher")
- }
- shared.watcher = watcher
-
- for {
- select {
- case winfo := <-shared.watch:
- shared.error <- shared.addWatch(winfo)
-
- case winfo := <-shared.remove:
- shared.error <- shared.removeWatch(winfo)
-
- case event, open := <-shared.watcher.Events:
- if !open {
- return
- }
- shared.sendEvent(event)
-
- case err, open := <-shared.watcher.Errors:
- if !open {
- return
- } else if err != nil {
- sysErr, ok := err.(*os.SyscallError)
- if !ok || sysErr.Err != syscall.EINTR {
- logger.Printf("Error in Watcher Error channel: %s", err)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/nxadm/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go
deleted file mode 100644
index 74e10aa42..000000000
--- a/vendor/github.com/nxadm/tail/watch/polling.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "os"
- "runtime"
- "time"
-
- "github.com/nxadm/tail/util"
- "gopkg.in/tomb.v1"
-)
-
-// PollingFileWatcher polls the file for changes.
-type PollingFileWatcher struct {
- Filename string
- Size int64
-}
-
-func NewPollingFileWatcher(filename string) *PollingFileWatcher {
- fw := &PollingFileWatcher{filename, 0}
- return fw
-}
-
-var POLL_DURATION time.Duration
-
-func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
- for {
- if _, err := os.Stat(fw.Filename); err == nil {
- return nil
- } else if !os.IsNotExist(err) {
- return err
- }
- select {
- case <-time.After(POLL_DURATION):
- continue
- case <-t.Dying():
- return tomb.ErrDying
- }
- }
- panic("unreachable")
-}
-
-func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
- origFi, err := os.Stat(fw.Filename)
- if err != nil {
- return nil, err
- }
-
- changes := NewFileChanges()
- var prevModTime time.Time
-
- // XXX: use tomb.Tomb to cleanly manage these goroutines. replace
- // the fatal (below) with tomb's Kill.
-
- fw.Size = pos
-
- go func() {
- prevSize := fw.Size
- for {
- select {
- case <-t.Dying():
- return
- default:
- }
-
- time.Sleep(POLL_DURATION)
- fi, err := os.Stat(fw.Filename)
- if err != nil {
- // Windows cannot delete a file if a handle is still open (tail keeps one open)
- // so it gives access denied to anything trying to read it until all handles are released.
- if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
- // File does not exist (has been deleted).
- changes.NotifyDeleted()
- return
- }
-
- // XXX: report this error back to the user
- util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
- }
-
- // File got moved/renamed?
- if !os.SameFile(origFi, fi) {
- changes.NotifyDeleted()
- return
- }
-
- // File got truncated?
- fw.Size = fi.Size()
- if prevSize > 0 && prevSize > fw.Size {
- changes.NotifyTruncated()
- prevSize = fw.Size
- continue
- }
- // File got bigger?
- if prevSize > 0 && prevSize < fw.Size {
- changes.NotifyModified()
- prevSize = fw.Size
- continue
- }
- prevSize = fw.Size
-
- // File was appended to (changed)?
- modTime := fi.ModTime()
- if modTime != prevModTime {
- prevModTime = modTime
- changes.NotifyModified()
- }
- }
- }()
-
- return changes, nil
-}
-
-func init() {
- POLL_DURATION = 250 * time.Millisecond
-}
diff --git a/vendor/github.com/nxadm/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go
deleted file mode 100644
index 2b5112805..000000000
--- a/vendor/github.com/nxadm/tail/watch/watch.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import "gopkg.in/tomb.v1"
-
-// FileWatcher monitors file-level events.
-type FileWatcher interface {
- // BlockUntilExists blocks until the file comes into existence.
- BlockUntilExists(*tomb.Tomb) error
-
- // ChangeEvents reports on changes to a file, be it modification,
- // deletion, renames or truncations. Returned FileChanges group of
- // channels will be closed, thus become unusable, after a deletion
- // or truncation event.
- // In order to properly report truncations, ChangeEvents requires
- // the caller to pass their current offset in the file.
- ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
-}
diff --git a/vendor/github.com/nxadm/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go
deleted file mode 100644
index 4562ac7c2..000000000
--- a/vendor/github.com/nxadm/tail/winfile/winfile.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail
-// +build windows
-
-package winfile
-
-import (
- "os"
- "syscall"
- "unsafe"
-)
-
-// issue also described here
-//https://codereview.appspot.com/8203043/
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
-func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- var access uint32
- switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
- case syscall.O_RDONLY:
- access = syscall.GENERIC_READ
- case syscall.O_WRONLY:
- access = syscall.GENERIC_WRITE
- case syscall.O_RDWR:
- access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
- }
- if mode&syscall.O_CREAT != 0 {
- access |= syscall.GENERIC_WRITE
- }
- if mode&syscall.O_APPEND != 0 {
- access &^= syscall.GENERIC_WRITE
- access |= syscall.FILE_APPEND_DATA
- }
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
- var sa *syscall.SecurityAttributes
- if mode&syscall.O_CLOEXEC == 0 {
- sa = makeInheritSa()
- }
- var createmode uint32
- switch {
- case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
- createmode = syscall.CREATE_NEW
- case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
- createmode = syscall.CREATE_ALWAYS
- case mode&syscall.O_CREAT == syscall.O_CREAT:
- createmode = syscall.OPEN_ALWAYS
- case mode&syscall.O_TRUNC == syscall.O_TRUNC:
- createmode = syscall.TRUNCATE_EXISTING
- default:
- createmode = syscall.OPEN_EXISTING
- }
- h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
- return h, e
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
-func makeInheritSa() *syscall.SecurityAttributes {
- var sa syscall.SecurityAttributes
- sa.Length = uint32(unsafe.Sizeof(sa))
- sa.InheritHandle = 1
- return &sa
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
-func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
- r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
- if e != nil {
- return nil, e
- }
- return os.NewFile(uintptr(r), name), nil
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
-func syscallMode(i os.FileMode) (o uint32) {
- o |= uint32(i.Perm())
- if i&os.ModeSetuid != 0 {
- o |= syscall.S_ISUID
- }
- if i&os.ModeSetgid != 0 {
- o |= syscall.S_ISGID
- }
- if i&os.ModeSticky != 0 {
- o |= syscall.S_ISVTX
- }
- // No mapping for Go's ModeTemporary (plan9 only).
- return
-}
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
deleted file mode 100644
index ea0966d5b..000000000
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-go:
- - tip
- - 1.16.x
- - 1.15.x
-
-cache:
- directories:
- - $GOPATH/pkg/mod
-
-# allow internal package imports, necessary for forked repositories
-go_import_path: github.com/onsi/ginkgo
-
-install:
- - GO111MODULE="off" go get -v -t ./...
- - GO111MODULE="off" go get golang.org/x/tools/cmd/cover
- - GO111MODULE="off" go get github.com/onsi/gomega
- - GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo
- - export PATH=$GOPATH/bin:$PATH
-
-script:
- - GO111MODULE="on" go mod tidy && git diff --exit-code go.mod go.sum
- - go vet
- - ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
deleted file mode 100644
index a26bc530f..000000000
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ /dev/null
@@ -1,393 +0,0 @@
-## 1.16.5
-
-Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
-1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
-
-You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
-
-## 1.16.4
-
-### Fixes
-1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
-
-## 1.16.3
-
-### Features
-- Measure is now deprecated and emits a deprecation warning.
-
-## 1.16.2
-
-### Fixes
-- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=` environment variable.
-
-## 1.16.1
-
-### Fixes
-- Supress --stream deprecation warning on windows (#793)
-
-## 1.16.0
-
-### Features
-- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913]
- - Update README.md to advertise that Ginkgo 2.0 is coming.
- - Backport the 2.0 DeprecationTracker and start alerting users
- about upcoming deprecations.
-
-- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
-
-### Fixes
-- Fix accidental reference to 1488 (#784) [9fb7fe4]
-
-## 1.15.2
-
-### Fixes
-- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0]
-
-## 1.15.1
-
-### Fixes
-- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
-
-## 1.15.0
-
-### Features
-- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583]
-- Add support for using template to generate tests (#752) [efb9e69]
-- Add a Chinese Doc #755 (#756) [5207632]
-- cli: allow multiple -focus and -skip flags (#736) [9a782fb]
-
-### Fixes
-- Add _internal to filename of tests created with internal flag (#751) [43c12da]
-
-## 1.14.2
-
-### Fixes
-- correct handling windows backslash in import path (#721) [97f3d51]
-- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d]
-
-## 1.14.1
-
-### Fixes
-- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
-
-## 1.14.0
-
-### Features
-- Defer running top-level container nodes until RunSpecs is called [d44dedf]
-- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle)
-- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692))
-- Print Skip reason in JUnit reporter if one was provided [820dfab]
-
-## 1.13.0
-
-### Features
-- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2]
-
-### Fixes
-- Ensure integration tests pass in an environment sans GOPATH [606fba2]
-- Add books package (#568) [fc0e44e]
-- doc(readme): installation via "tools package" (#677) [83bb20e]
-- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75]
-- Import package without dot (#687) [6321024]
-- Fix integration tests to stop require GOPATH (#686) [a912ec5]
-
-## 1.12.3
-
-### Fixes
-- Print correct code location of failing table test (#666) [c6d7afb]
-
-## 1.12.2
-
-### Fixes
-- Update dependencies [ea4a036]
-
-## 1.12.1
-
-### Fixes
-- Make unfocus ("blur") much faster (#674) [8b18061]
-- Fix typo (#673) [7fdcbe8]
-- Test against 1.14 and remove 1.12 [d5c2ad6]
-- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
-- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
-- improve ginkgo performance - makes progress on #644 [a14f98e]
-- fix convert integration tests [1f8ba69]
-- fix typo succesful -> successful (#663) [1ea49cf]
-- Fix invalid link (#658) [b886136]
-- convert utility : Include comments from source (#657) [1077c6d]
-- Explain what BDD means [d79e7fb]
-- skip race detector test on unsupported platform (#642) [f8ab89d]
-- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
-- Fix missing newline in combined coverage file (#641) [6a07ea2]
-- check if a spec is run before returning SpecSummary (#645) [8850000]
-
-## 1.12.0
-
-### Features
-- Add module definition (#630) [78916ab]
-
-## 1.11.0
-
-### Features
-- Add syscall for riscv64 architecture [f66e896]
-- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
-- teamcity reporter: output newline after every service message (#625) [3cfa02d]
-- Add support for go module when running `generate` command (#578) [9c89e3f]
-
-## 1.10.3
-
-### Fixes
-- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
-- Add integration test [d90e0dc]
-- Fix coverage files combining [e5dde8c]
-- A new CLI option: -ginkgo.reportFile (#601) [034fd25]
-
-## 1.10.2
-
-### Fixes
-- speed up table entry generateIt() (#609) [5049dc5]
-- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
-
-## 1.10.1
-
-### Fixes
-- stack backtrace: fix skipping (#600) [2a4c0bd]
-
-## 1.10.0
-
-### Fixes
-- stack backtrace: fix alignment and skipping [66915d6]
-- fix typo in documentation [8f97b93]
-
-## 1.9.0
-
-### Features
-- Option to print output into report, when tests have passed [0545415]
-
-### Fixes
-- Fixed typos in comments [0ecbc58]
-- gofmt code [a7f8bfb]
-- Simplify code [7454d00]
-- Simplify concatenation, incrementation and function assignment [4825557]
-- Avoid unnecessary conversions [9d9403c]
-- JUnit: include more detailed information about panic [19cca4b]
-- Print help to stdout when the user asks for help [4cb7441]
-
-
-## 1.8.0
-
-### New Features
-- allow config of the vet flag for `go test` (#562) [3cd45fa]
-- Support projects using go modules [d56ee76]
-
-### Fixes and Minor Improvements
-- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
-- Optimize focus to avoid allocations [f493786]
-- Ensure generated test file names are underscored [505cc35]
-
-## 1.7.0
-
-### New Features
-- Add JustAfterEach (#484) [0d4f080]
-
-### Fixes
-- Correctly round suite time in junit reporter [2445fc1]
-- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
-
-## 1.6.0
-
-### New Features
-- add --debug flag to emit node output to files (#499) [39febac]
-
-### Fixes
-- fix: for `go vet` to pass [69338ec]
-- docs: fix for contributing instructions [7004cb1]
-- consolidate and streamline contribution docs (#494) [d848015]
-- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
-- all: gofmt [000d317]
-- Increase eventually timeout to 30s [c73579c]
-- Clarify asynchronous test behaviour [294d8f4]
-- Travis badge should only show master [26d2143]
-
-## 1.5.0 5/10/2018
-
-### New Features
-- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
-- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
-- Re-add noisySkippings flag [652e15c]
-- Allow coverage to be displayed for focused specs (#367) [11459a8]
-- Handle -outputdir flag (#364) [228e3a8]
-- Handle -coverprofile flag (#355) [43392d5]
-
-### Fixes
-- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
-- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
-- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
-- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
-- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
-- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
-- Add an extra new line after reporting spec run completion for test2json [874520d]
-- added name name field to junit reported testsuite [ae61c63]
-- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
-- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
-- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
-- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
-- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
-- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
-- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
-- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
-- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
-- Replace GOPATH in Environment [4b883f0]
-
-
-## 1.4.0 7/16/2017
-
-- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
-- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
-- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
-- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
-- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
-- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
-
-## 1.3.0 3/28/2017
-
-Improvements:
-
-- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
-- `Skip(message)` can be used to skip the current test.
-- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
-- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
-- Support for retrying flaky tests with `--flakeAttempts`
-- `ginkgo ./...` now recurses as you'd expect
-- Added `Specify` a synonym for `It`
-- Support colorise on Windows
-- Broader support for various go compilation flags in the `ginkgo` CLI
-
-Bug Fixes:
-
-- Ginkgo tests now fail when you `panic(nil)` (#167)
-
-## 1.2.0 5/31/2015
-
-Improvements
-
-- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
-- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
-- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
-
-## 1.2.0-beta
-
-Ginkgo now requires Go 1.4+
-
-Improvements:
-
-- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
-- Improved focus behavior. Now, this:
-
- ```golang
- FDescribe("Some describe", func() {
- It("A", func() {})
-
- FIt("B", func() {})
- })
- ```
-
- will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
-- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
-- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
-- Improved output when an error occurs in a setup or teardown block.
-- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
-- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
-- Add support for precompiled tests:
- - `ginkgo build ` will now compile the package, producing a file named `package.test`
- - The compiled `package.test` file can be run directly. This runs the tests in series.
- - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
-- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
-- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
-- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
-- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
-- `ginkgo -notify` now works on Linux
-
-Bug Fixes:
-
-- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
-- Fix tempfile leak when running in parallel
-- Fix incorrect failure message when a panic occurs during a parallel test run
-- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
-- Be more consistent about handling SIGTERM as well as SIGINT
-- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
-- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
-
-## 1.1.0 (8/2/2014)
-
-No changes, just dropping the beta.
-
-## 1.1.0-beta (7/22/2014)
-New Features:
-
-- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
-- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
-- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
-- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
-- `ginkgo --failFast` aborts the test suite after the first failure.
-- `ginkgo generate file_1 file_2` can take multiple file arguments.
-- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
-- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
-
-Improvements:
-
-- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
-- `ginkgo --untilItFails` no longer recompiles between attempts.
-- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
-
-Bug Fixes:
-
-- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
-- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
-
-## 1.0.0 (5/24/2014)
-New Features:
-
-- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
-
-Improvements:
-
-- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
-- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
-
-Bug Fixes:
-
-- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
-- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
-- Fix all remaining race conditions in Ginkgo's test suite.
-
-## 1.0.0-beta (4/14/2014)
-Breaking changes:
-
-- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
-- Modified the Reporter interface
-- `watch` is now a subcommand, not a flag.
-
-DSL changes:
-
-- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
-- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
-- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
-
-CLI changes:
-
-- `watch` is now a subcommand, not a flag
-- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
-- Additional arguments can be passed to specs. Pass them after the `--` separator
-- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
-- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
-
-Misc:
-
-- Start using semantic versioning
-- Start maintaining changelog
-
-Major refactor:
-
-- Pull out Ginkgo's internal to `internal`
-- Rename `example` everywhere to `spec`
-- Much more!
diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
deleted file mode 100644
index 908b95c2c..000000000
--- a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Contributing to Ginkgo
-
-Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
-
-- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
-- Ensure adequate test coverage:
- - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
-- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
- If relevant, please submit a docs PR to that branch alongside your code PR.
-
-Thanks for supporting Ginkgo!
-
-## Setup
-
-Fork the repo, then:
-
-```
-go get github.com/onsi/ginkgo
-go get github.com/onsi/gomega/...
-cd $GOPATH/src/github.com/onsi/ginkgo
-git remote add fork git@github.com:/ginkgo.git
-
-ginkgo -r -p # ensure tests are green
-go vet ./... # ensure linter is happy
-```
-
-## Making the PR
- - go to a new branch `git checkout -b my-feature`
- - make your changes
- - run tests and linter again (see above)
- - `git push fork`
- - open PR 🎉
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
deleted file mode 100644
index a25ca5e03..000000000
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ /dev/null
@@ -1,169 +0,0 @@
-![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
-
-[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
-
-Jump to the [docs](https://onsi.github.io/ginkgo/) | [中文文档](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
-
-If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
-
-# Ginkgo 2.0 Release Candidate is available!
-
-An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
-
-As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
-
-Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
-
-## TLDR
-Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.
-It is typically (and optionally) paired with the [Gomega](https://github.com/onsi/gomega) matcher library.
-
-```go
-Describe("the strings package", func() {
- Context("strings.Contains()", func() {
- When("the string contains the substring in the middle", func() {
- It("returns `true`", func() {
- Expect(strings.Contains("Ginkgo is awesome", "is")).To(BeTrue())
- })
- })
- })
-})
-```
-
-## Feature List
-
-- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
-
-- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
- - Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- - [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- - [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
- - [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- - [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
-
-- A comprehensive test runner that lets you:
- - Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
- - [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- - Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- - Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
-
-- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- - `ginkgo -cover` runs your tests using Go's code coverage tool
- - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- - `ginkgo -r` runs all tests suites under the current directory
- - `ginkgo -v` prints out identifying information for each tests just before it runs
-
- And much more: run `ginkgo help` for details!
-
- The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
-
-- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
-
-- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
-
-- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
-
-- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
-
-- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
-
-- [Ginkgo tools for VSCode](https://marketplace.visualstudio.com/items?itemName=joselitofilho.ginkgotestexplorer): just use VSCode's extension installer to install `ginkgoTestExplorer`.
-
-- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
-
-- A modular architecture that lets you easily:
- - Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- - [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
-
-## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
-
-Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
-
-## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
-
-Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
-
-## Getting Started
-
-You'll need the Go command-line tools. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
-
-### Global installation
-To install the Ginkgo command line interface:
-```bash
-go get -u github.com/onsi/ginkgo/ginkgo
-```
-Note that this will install it to `$GOBIN`, which will need to be in the `$PATH` (or equivalent). Run `go help install` for more information.
-
-### Go module ["tools package"](https://github.com/golang/go/issues/25922):
-Create (or update) a file called `tools/tools.go` with the following contents:
-```go
-// +build tools
-
-package tools
-
-import (
- _ "github.com/onsi/ginkgo/ginkgo"
-)
-
-// This file imports packages that are used when running go generate, or used
-// during the development process but not otherwise depended on by built code.
-```
-The Ginkgo command can then be run via `go run github.com/onsi/ginkgo/ginkgo`.
-This approach allows the version of Ginkgo to be maintained under source control for reproducible results,
-and is well suited to automated test pipelines.
-
-### Bootstrapping
-```bash
-cd path/to/package/you/want/to/test
-
-ginkgo bootstrap # set up a new ginkgo suite
-ginkgo generate # will create a sample test file. edit this file and add your tests then...
-
-go test # to run your tests
-
-ginkgo # also runs your tests
-
-```
-
-## I'm new to Go: What are my testing options?
-
-Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
-
-With that said, it's great to know what your options are :)
-
-### What Go gives you out of the box
-
-Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
-
-### Matcher libraries for Go's XUnit style tests
-
-A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
-
-- [testify](https://github.com/stretchr/testify)
-- [gocheck](https://labix.org/gocheck)
-
-You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
-
-### BDD style testing frameworks
-
-There are a handful of BDD-style testing frameworks written for Go. Here are a few:
-
-- [Ginkgo](https://github.com/onsi/ginkgo) ;)
-- [GoConvey](https://github.com/smartystreets/goconvey)
-- [Goblin](https://github.com/franela/goblin)
-- [Mao](https://github.com/azer/mao)
-- [Zen](https://github.com/pranavraja/zen)
-
-Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
-
-Go explore!
-
-## License
-
-Ginkgo is MIT-Licensed
-
-## Contributing
-
-See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
deleted file mode 100644
index 3130c7789..000000000
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
-Ginkgo accepts a number of configuration options.
-
-These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
-
-You can also learn more via
-
- ginkgo help
-
-or (I kid you not):
-
- go test -asdf
-*/
-package config
-
-import (
- "flag"
- "time"
-
- "fmt"
-)
-
-const VERSION = "1.16.5"
-
-type GinkgoConfigType struct {
- RandomSeed int64
- RandomizeAllSpecs bool
- RegexScansFilePath bool
- FocusStrings []string
- SkipStrings []string
- SkipMeasurements bool
- FailOnPending bool
- FailFast bool
- FlakeAttempts int
- EmitSpecProgress bool
- DryRun bool
- DebugParallel bool
-
- ParallelNode int
- ParallelTotal int
- SyncHost string
- StreamHost string
-}
-
-var GinkgoConfig = GinkgoConfigType{}
-
-type DefaultReporterConfigType struct {
- NoColor bool
- SlowSpecThreshold float64
- NoisyPendings bool
- NoisySkippings bool
- Succinct bool
- Verbose bool
- FullTrace bool
- ReportPassed bool
- ReportFile string
-}
-
-var DefaultReporterConfig = DefaultReporterConfigType{}
-
-func processPrefix(prefix string) string {
- if prefix != "" {
- prefix += "."
- }
- return prefix
-}
-
-type flagFunc func(string)
-
-func (f flagFunc) String() string { return "" }
-func (f flagFunc) Set(s string) error { f(s); return nil }
-
-func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
- prefix = processPrefix(prefix)
- flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
- flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
- flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
- flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
- flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
-
- flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
-
- flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.")
- flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.")
-
- flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
-
- flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
-
- flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
-
- flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
-
- if includeParallelFlags {
- flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
- flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
- flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
- flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
- }
-
- flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
- flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
- flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
- flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
- flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
- flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
- flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
- flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
- flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
-
-}
-
-func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
- prefix = processPrefix(prefix)
- result := make([]string, 0)
-
- if ginkgo.RandomSeed > 0 {
- result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
- }
-
- if ginkgo.RandomizeAllSpecs {
- result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
- }
-
- if ginkgo.SkipMeasurements {
- result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
- }
-
- if ginkgo.FailOnPending {
- result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
- }
-
- if ginkgo.FailFast {
- result = append(result, fmt.Sprintf("--%sfailFast", prefix))
- }
-
- if ginkgo.DryRun {
- result = append(result, fmt.Sprintf("--%sdryRun", prefix))
- }
-
- for _, s := range ginkgo.FocusStrings {
- result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s))
- }
-
- for _, s := range ginkgo.SkipStrings {
- result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s))
- }
-
- if ginkgo.FlakeAttempts > 1 {
- result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
- }
-
- if ginkgo.EmitSpecProgress {
- result = append(result, fmt.Sprintf("--%sprogress", prefix))
- }
-
- if ginkgo.DebugParallel {
- result = append(result, fmt.Sprintf("--%sdebug", prefix))
- }
-
- if ginkgo.ParallelNode != 0 {
- result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
- }
-
- if ginkgo.ParallelTotal != 0 {
- result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
- }
-
- if ginkgo.StreamHost != "" {
- result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
- }
-
- if ginkgo.SyncHost != "" {
- result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
- }
-
- if ginkgo.RegexScansFilePath {
- result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
- }
-
- if reporter.NoColor {
- result = append(result, fmt.Sprintf("--%snoColor", prefix))
- }
-
- if reporter.SlowSpecThreshold > 0 {
- result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
- }
-
- if !reporter.NoisyPendings {
- result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
- }
-
- if !reporter.NoisySkippings {
- result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
- }
-
- if reporter.Verbose {
- result = append(result, fmt.Sprintf("--%sv", prefix))
- }
-
- if reporter.Succinct {
- result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
- }
-
- if reporter.FullTrace {
- result = append(result, fmt.Sprintf("--%strace", prefix))
- }
-
- if reporter.ReportPassed {
- result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
- }
-
- if reporter.ReportFile != "" {
- result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
- }
-
- return result
-}
-
-// flagFocus implements the -focus flag.
-func flagFocus(arg string) {
- if arg != "" {
- GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg)
- }
-}
-
-// flagSkip implements the -skip flag.
-func flagSkip(arg string) {
- if arg != "" {
- GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg)
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
deleted file mode 100644
index ea10e9796..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "text/template"
-
- "go/build"
-
- sprig "github.com/go-task/slim-sprig"
- "github.com/onsi/ginkgo/ginkgo/nodot"
-)
-
-func BuildBootstrapCommand() *Command {
- var (
- agouti, noDot, internal bool
- customBootstrapFile string
- )
- flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
- flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
- flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
- flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name")
- flagSet.StringVar(&customBootstrapFile, "template", "", "If specified, generate will use the contents of the file passed as the bootstrap template")
-
- return &Command{
- Name: "bootstrap",
- FlagSet: flagSet,
- UsageCommand: "ginkgo bootstrap ",
- Usage: []string{
- "Bootstrap a test suite for the current package",
- "Accepts the following flags:",
- },
- Command: func(args []string, additionalArgs []string) {
- generateBootstrap(agouti, noDot, internal, customBootstrapFile)
- emitRCAdvertisement()
- },
- }
-}
-
-var bootstrapText = `package {{.Package}}
-
-import (
- "testing"
-
- {{.GinkgoImport}}
- {{.GomegaImport}}
-)
-
-func Test{{.FormattedName}}(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "{{.FormattedName}} Suite")
-}
-`
-
-var agoutiBootstrapText = `package {{.Package}}
-
-import (
- "testing"
-
- {{.GinkgoImport}}
- {{.GomegaImport}}
- "github.com/sclevine/agouti"
-)
-
-func Test{{.FormattedName}}(t *testing.T) {
- RegisterFailHandler(Fail)
- RunSpecs(t, "{{.FormattedName}} Suite")
-}
-
-var agoutiDriver *agouti.WebDriver
-
-var _ = BeforeSuite(func() {
- // Choose a WebDriver:
-
- agoutiDriver = agouti.PhantomJS()
- // agoutiDriver = agouti.Selenium()
- // agoutiDriver = agouti.ChromeDriver()
-
- Expect(agoutiDriver.Start()).To(Succeed())
-})
-
-var _ = AfterSuite(func() {
- Expect(agoutiDriver.Stop()).To(Succeed())
-})
-`
-
-type bootstrapData struct {
- Package string
- FormattedName string
- GinkgoImport string
- GomegaImport string
-}
-
-func getPackageAndFormattedName() (string, string, string) {
- path, err := os.Getwd()
- if err != nil {
- complainAndQuit("Could not get current working directory: \n" + err.Error())
- }
-
- dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
- dirName = strings.Replace(dirName, " ", "_", -1)
-
- pkg, err := build.ImportDir(path, 0)
- packageName := pkg.Name
- if err != nil {
- packageName = dirName
- }
-
- formattedName := prettifyPackageName(filepath.Base(path))
- return packageName, dirName, formattedName
-}
-
-func prettifyPackageName(name string) string {
- name = strings.Replace(name, "-", " ", -1)
- name = strings.Replace(name, "_", " ", -1)
- name = strings.Title(name)
- name = strings.Replace(name, " ", "", -1)
- return name
-}
-
-func determinePackageName(name string, internal bool) string {
- if internal {
- return name
- }
-
- return name + "_test"
-}
-
-func fileExists(path string) bool {
- _, err := os.Stat(path)
- return err == nil
-}
-
-func generateBootstrap(agouti, noDot, internal bool, customBootstrapFile string) {
- packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
- data := bootstrapData{
- Package: determinePackageName(packageName, internal),
- FormattedName: formattedName,
- GinkgoImport: `. "github.com/onsi/ginkgo"`,
- GomegaImport: `. "github.com/onsi/gomega"`,
- }
-
- if noDot {
- data.GinkgoImport = `"github.com/onsi/ginkgo"`
- data.GomegaImport = `"github.com/onsi/gomega"`
- }
-
- targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
- if fileExists(targetFile) {
- fmt.Printf("%s already exists.\n\n", targetFile)
- os.Exit(1)
- } else {
- fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
- }
-
- f, err := os.Create(targetFile)
- if err != nil {
- complainAndQuit("Could not create file: " + err.Error())
- panic(err.Error())
- }
- defer f.Close()
-
- var templateText string
- if customBootstrapFile != "" {
- tpl, err := ioutil.ReadFile(customBootstrapFile)
- if err != nil {
- panic(err.Error())
- }
- templateText = string(tpl)
- } else if agouti {
- templateText = agoutiBootstrapText
- } else {
- templateText = bootstrapText
- }
-
- bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText)
- if err != nil {
- panic(err.Error())
- }
-
- buf := &bytes.Buffer{}
- bootstrapTemplate.Execute(buf, data)
-
- if noDot {
- contents, err := nodot.ApplyNoDot(buf.Bytes())
- if err != nil {
- complainAndQuit("Failed to import nodot declarations: " + err.Error())
- }
- fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
- buf = bytes.NewBuffer(contents)
- }
-
- buf.WriteTo(f)
-
- goFmt(targetFile)
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
deleted file mode 100644
index 2fddef0f7..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/onsi/ginkgo/ginkgo/interrupthandler"
- "github.com/onsi/ginkgo/ginkgo/testrunner"
-)
-
-func BuildBuildCommand() *Command {
- commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
- interruptHandler := interrupthandler.NewInterruptHandler()
- builder := &SpecBuilder{
- commandFlags: commandFlags,
- interruptHandler: interruptHandler,
- }
-
- return &Command{
- Name: "build",
- FlagSet: commandFlags.FlagSet,
- UsageCommand: "ginkgo build ",
- Usage: []string{
- "Build the passed in (or the package in the current directory if left blank).",
- "Accepts the following flags:",
- },
- Command: builder.BuildSpecs,
- }
-}
-
-type SpecBuilder struct {
- commandFlags *RunWatchAndBuildCommandFlags
- interruptHandler *interrupthandler.InterruptHandler
-}
-
-func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
- r.commandFlags.computeNodes()
-
- suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
-
- if len(suites) == 0 {
- complainAndQuit("Found no test suites")
- }
-
- passed := true
- for _, suite := range suites {
- runner := testrunner.New(suite, 1, false, 0, r.commandFlags.GoOpts, nil)
- fmt.Printf("Compiling %s...\n", suite.PackageName)
-
- path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
- err := runner.CompileTo(path)
- if err != nil {
- fmt.Println(err.Error())
- passed = false
- } else {
- fmt.Printf(" compiled %s.test\n", suite.PackageName)
- }
- }
-
- if passed {
- os.Exit(0)
- }
- os.Exit(1)
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
deleted file mode 100644
index 02e2b3b32..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package convert
-
-import (
- "fmt"
- "go/ast"
- "strings"
- "unicode"
-)
-
-/*
- * Creates a func init() node
- */
-func createVarUnderscoreBlock() *ast.ValueSpec {
- valueSpec := &ast.ValueSpec{}
- object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
- ident := &ast.Ident{Name: "_", Obj: object}
- valueSpec.Names = append(valueSpec.Names, ident)
- return valueSpec
-}
-
-/*
- * Creates a Describe("Testing with ginkgo", func() { }) node
- */
-func createDescribeBlock() *ast.CallExpr {
- blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
-
- fieldList := &ast.FieldList{}
- funcType := &ast.FuncType{Params: fieldList}
- funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
- basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
- describeIdent := &ast.Ident{Name: "Describe"}
- return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
-}
-
-/*
- * Convenience function to return the name of the *testing.T param
- * for a Test function that will be rewritten. This is useful because
- * we will want to replace the usage of this named *testing.T inside the
- * body of the function with a GinktoT.
- */
-func namedTestingTArg(node *ast.FuncDecl) string {
- return node.Type.Params.List[0].Names[0].Name // *exhale*
-}
-
-/*
- * Convenience function to return the block statement node for a Describe statement
- */
-func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
- var funcLit *ast.FuncLit
- var found = false
-
- for _, node := range desc.Args {
- switch node := node.(type) {
- case *ast.FuncLit:
- found = true
- funcLit = node
- break
- }
- }
-
- if !found {
- panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
- }
-
- return funcLit.Body
-}
-
-/* convenience function for creating an It("TestNameHere")
- * with all the body of the test function inside the anonymous
- * func passed to It()
- */
-func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
- blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
- fieldList := &ast.FieldList{}
- funcType := &ast.FuncType{Params: fieldList}
- funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
-
- testName := rewriteTestName(testFunc.Name.Name)
- basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
- itBlockIdent := &ast.Ident{Name: "It"}
- callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
- return &ast.ExprStmt{X: callExpr}
-}
-
-/*
-* rewrite test names to be human readable
-* eg: rewrites "TestSomethingAmazing" as "something amazing"
- */
-func rewriteTestName(testName string) string {
- nameComponents := []string{}
- currentString := ""
- indexOfTest := strings.Index(testName, "Test")
- if indexOfTest != 0 {
- return testName
- }
-
- testName = strings.Replace(testName, "Test", "", 1)
- first, rest := testName[0], testName[1:]
- testName = string(unicode.ToLower(rune(first))) + rest
-
- for _, rune := range testName {
- if unicode.IsUpper(rune) {
- nameComponents = append(nameComponents, currentString)
- currentString = string(unicode.ToLower(rune))
- } else {
- currentString += string(rune)
- }
- }
-
- return strings.Join(append(nameComponents, currentString), " ")
-}
-
-func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
- return &ast.CallExpr{
- Lparen: ident.NamePos + 1,
- Rparen: ident.NamePos + 2,
- Fun: &ast.Ident{Name: "GinkgoT"},
- }
-}
-
-func newGinkgoTInterface() *ast.Ident {
- return &ast.Ident{Name: "GinkgoTInterface"}
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
deleted file mode 100644
index 06c6ec94c..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package convert
-
-import (
- "fmt"
- "go/ast"
-)
-
-/*
- * Given the root node of an AST, returns the node containing the
- * import statements for the file.
- */
-func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
- for _, declaration := range rootNode.Decls {
- decl, ok := declaration.(*ast.GenDecl)
- if !ok || len(decl.Specs) == 0 {
- continue
- }
-
- _, ok = decl.Specs[0].(*ast.ImportSpec)
- if ok {
- imports = decl
- return
- }
- }
-
- err = fmt.Errorf("Could not find imports for root node:\n\t%#v\n", rootNode)
- return
-}
-
-/*
- * Removes "testing" import, if present
- */
-func removeTestingImport(rootNode *ast.File) {
- importDecl, err := importsForRootNode(rootNode)
- if err != nil {
- panic(err.Error())
- }
-
- var index int
- for i, importSpec := range importDecl.Specs {
- importSpec := importSpec.(*ast.ImportSpec)
- if importSpec.Path.Value == "\"testing\"" {
- index = i
- break
- }
- }
-
- importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
-}
-
-/*
- * Adds import statements for onsi/ginkgo, if missing
- */
-func addGinkgoImports(rootNode *ast.File) {
- importDecl, err := importsForRootNode(rootNode)
- if err != nil {
- panic(err.Error())
- }
-
- if len(importDecl.Specs) == 0 {
- // TODO: might need to create a import decl here
- panic("unimplemented : expected to find an imports block")
- }
-
- needsGinkgo := true
- for _, importSpec := range importDecl.Specs {
- importSpec, ok := importSpec.(*ast.ImportSpec)
- if !ok {
- continue
- }
-
- if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
- needsGinkgo = false
- }
- }
-
- if needsGinkgo {
- importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
- }
-}
-
-/*
- * convenience function to create an import statement
- */
-func createImport(name, path string) *ast.ImportSpec {
- return &ast.ImportSpec{
- Name: &ast.Ident{Name: name},
- Path: &ast.BasicLit{Kind: 9, Value: path},
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
deleted file mode 100644
index 363e52fe2..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package convert
-
-import (
- "fmt"
- "go/build"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
-)
-
-/*
- * RewritePackage takes a name (eg: my-package/tools), finds its test files using
- * Go's build package, and then rewrites them. A ginkgo test suite file will
- * also be added for this package, and all of its child packages.
- */
-func RewritePackage(packageName string) {
- pkg, err := packageWithName(packageName)
- if err != nil {
- panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
- }
-
- for _, filename := range findTestsInPackage(pkg) {
- rewriteTestsInFile(filename)
- }
-}
-
-/*
- * Given a package, findTestsInPackage reads the test files in the directory,
- * and then recurses on each child package, returning a slice of all test files
- * found in this process.
- */
-func findTestsInPackage(pkg *build.Package) (testfiles []string) {
- for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
- testfile, _ := filepath.Abs(filepath.Join(pkg.Dir, file))
- testfiles = append(testfiles, testfile)
- }
-
- dirFiles, err := ioutil.ReadDir(pkg.Dir)
- if err != nil {
- panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
- }
-
- re := regexp.MustCompile(`^[._]`)
-
- for _, file := range dirFiles {
- if !file.IsDir() {
- continue
- }
-
- if re.Match([]byte(file.Name())) {
- continue
- }
-
- packageName := filepath.Join(pkg.ImportPath, file.Name())
- subPackage, err := packageWithName(packageName)
- if err != nil {
- panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
- }
-
- testfiles = append(testfiles, findTestsInPackage(subPackage)...)
- }
-
- addGinkgoSuiteForPackage(pkg)
- goFmtPackage(pkg)
- return
-}
-
-/*
- * Shells out to `ginkgo bootstrap` to create a test suite file
- */
-func addGinkgoSuiteForPackage(pkg *build.Package) {
- originalDir, err := os.Getwd()
- if err != nil {
- panic(err)
- }
-
- suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
-
- _, err = os.Stat(suite_test_file)
- if err == nil {
- return // test file already exists, this should be a no-op
- }
-
- err = os.Chdir(pkg.Dir)
- if err != nil {
- panic(err)
- }
-
- output, err := exec.Command("ginkgo", "bootstrap").Output()
-
- if err != nil {
- panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
- }
-
- err = os.Chdir(originalDir)
- if err != nil {
- panic(err)
- }
-}
-
-/*
- * Shells out to `go fmt` to format the package
- */
-func goFmtPackage(pkg *build.Package) {
- path, _ := filepath.Abs(pkg.ImportPath)
- output, err := exec.Command("go", "fmt", path).CombinedOutput()
-
- if err != nil {
- fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", path, output, err.Error())
- }
-}
-
-/*
- * Attempts to return a package with its test files already read.
- * The ImportMode arg to build.Import lets you specify if you want go to read the
- * buildable go files inside the package, but it fails if the package has no go files
- */
-func packageWithName(name string) (pkg *build.Package, err error) {
- pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
- if err == nil {
- return
- }
-
- pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
- return
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
deleted file mode 100644
index b33595c9a..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package convert
-
-import (
- "go/ast"
- "regexp"
-)
-
-/*
- * Given a root node, walks its top level statements and returns
- * points to function nodes to rewrite as It statements.
- * These functions, according to Go testing convention, must be named
- * TestWithCamelCasedName and receive a single *testing.T argument.
- */
-func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
- testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
-
- ast.Inspect(rootNode, func(node ast.Node) bool {
- if node == nil {
- return false
- }
-
- switch node := node.(type) {
- case *ast.FuncDecl:
- matches := testNameRegexp.MatchString(node.Name.Name)
-
- if matches && receivesTestingT(node) {
- testsToRewrite = append(testsToRewrite, node)
- }
- }
-
- return true
- })
-
- return
-}
-
-/*
- * convenience function that looks at args to a function and determines if its
- * params include an argument of type *testing.T
- */
-func receivesTestingT(node *ast.FuncDecl) bool {
- if len(node.Type.Params.List) != 1 {
- return false
- }
-
- base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
- if !ok {
- return false
- }
-
- intermediate := base.X.(*ast.SelectorExpr)
- isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
- isTestingT := intermediate.Sel.Name == "T"
-
- return isTestingPackage && isTestingT
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
deleted file mode 100644
index 60c73504a..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package convert
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "io/ioutil"
- "os"
-)
-
-/*
- * Given a file path, rewrites any tests in the Ginkgo format.
- * First, we parse the AST, and update the imports declaration.
- * Then, we walk the first child elements in the file, returning tests to rewrite.
- * A top level init func is declared, with a single Describe func inside.
- * Then the test functions to rewrite are inserted as It statements inside the Describe.
- * Finally we walk the rest of the file, replacing other usages of *testing.T
- * Once that is complete, we write the AST back out again to its file.
- */
-func rewriteTestsInFile(pathToFile string) {
- fileSet := token.NewFileSet()
- rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, parser.ParseComments)
- if err != nil {
- panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
- }
-
- addGinkgoImports(rootNode)
- removeTestingImport(rootNode)
-
- varUnderscoreBlock := createVarUnderscoreBlock()
- describeBlock := createDescribeBlock()
- varUnderscoreBlock.Values = []ast.Expr{describeBlock}
-
- for _, testFunc := range findTestFuncs(rootNode) {
- rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
- }
-
- underscoreDecl := &ast.GenDecl{
- Tok: 85, // gah, magick numbers are needed to make this work
- TokPos: 14, // this tricks Go into writing "var _ = Describe"
- Specs: []ast.Spec{varUnderscoreBlock},
- }
-
- imports := rootNode.Decls[0]
- tail := rootNode.Decls[1:]
- rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
- rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
- walkNodesInRootNodeReplacingTestingT(rootNode)
-
- var buffer bytes.Buffer
- if err = format.Node(&buffer, fileSet, rootNode); err != nil {
- panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
- }
-
- fileInfo, err := os.Stat(pathToFile)
-
- if err != nil {
- panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
- }
-
- err = ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
-}
-
-/*
- * Given a test func named TestDoesSomethingNeat, rewrites it as
- * It("does something neat", func() { __test_body_here__ }) and adds it
- * to the Describe's list of statements
- */
-func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
- var funcIndex int = -1
- for index, child := range rootNode.Decls {
- if child == testFunc {
- funcIndex = index
- break
- }
- }
-
- if funcIndex < 0 {
- panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
- }
-
- var block *ast.BlockStmt = blockStatementFromDescribe(describe)
- block.List = append(block.List, createItStatementForTestFunc(testFunc))
- replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
-
- // remove the old test func from the root node's declarations
- rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
-}
-
-/*
- * walks nodes inside of a test func's statements and replaces the usage of
- * it's named *testing.T param with GinkgoT's
- */
-func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
- ast.Inspect(statementsBlock, func(node ast.Node) bool {
- if node == nil {
- return false
- }
-
- keyValueExpr, ok := node.(*ast.KeyValueExpr)
- if ok {
- replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
- return true
- }
-
- funcLiteral, ok := node.(*ast.FuncLit)
- if ok {
- replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
- return true
- }
-
- callExpr, ok := node.(*ast.CallExpr)
- if !ok {
- return true
- }
- replaceTestingTsInArgsLists(callExpr, testingT)
-
- funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
- if ok {
- replaceTestingTsMethodCalls(funCall, testingT)
- }
-
- return true
- })
-}
-
-/*
- * rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
- * This function receives a selector expression (eg: t.Fail()) and
- * the name of the *testing.T param from the function declaration. Rewrites the
- * selector expression in place if the target was a *testing.T
- */
-func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
- ident, ok := selectorExpr.X.(*ast.Ident)
- if !ok {
- return
- }
-
- if ident.Name == testingT {
- selectorExpr.X = newGinkgoTFromIdent(ident)
- }
-}
-
-/*
- * replaces usages of a named *testing.T param inside of a call expression
- * with a new GinkgoT object
- */
-func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
- for index, arg := range callExpr.Args {
- ident, ok := arg.(*ast.Ident)
- if !ok {
- continue
- }
-
- if ident.Name == testingT {
- callExpr.Args[index] = newGinkgoTFromIdent(ident)
- }
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
deleted file mode 100644
index 418cdc4e5..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package convert
-
-import (
- "go/ast"
-)
-
-/*
- * Rewrites any other top level funcs that receive a *testing.T param
- */
-func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
- for _, decl := range declarations {
- decl, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
-
- for _, param := range decl.Type.Params.List {
- starExpr, ok := param.Type.(*ast.StarExpr)
- if !ok {
- continue
- }
-
- selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
- if !ok {
- continue
- }
-
- xIdent, ok := selectorExpr.X.(*ast.Ident)
- if !ok || xIdent.Name != "testing" {
- continue
- }
-
- if selectorExpr.Sel.Name != "T" {
- continue
- }
-
- param.Type = newGinkgoTInterface()
- }
- }
-}
-
-/*
- * Walks all of the nodes in the file, replacing *testing.T in struct
- * and func literal nodes. eg:
- * type foo struct { *testing.T }
- * var bar = func(t *testing.T) { }
- */
-func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
- ast.Inspect(rootNode, func(node ast.Node) bool {
- if node == nil {
- return false
- }
-
- switch node := node.(type) {
- case *ast.StructType:
- replaceTestingTsInStructType(node)
- case *ast.FuncLit:
- replaceTypeDeclTestingTsInFuncLiteral(node)
- }
-
- return true
- })
-}
-
-/*
- * replaces named *testing.T inside a composite literal
- */
-func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
- ident, ok := kve.Value.(*ast.Ident)
- if !ok {
- return
- }
-
- if ident.Name == testingT {
- kve.Value = newGinkgoTFromIdent(ident)
- }
-}
-
-/*
- * replaces *testing.T params in a func literal with GinkgoT
- */
-func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
- for _, arg := range functionLiteral.Type.Params.List {
- starExpr, ok := arg.Type.(*ast.StarExpr)
- if !ok {
- continue
- }
-
- selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
- if !ok {
- continue
- }
-
- target, ok := selectorExpr.X.(*ast.Ident)
- if !ok {
- continue
- }
-
- if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
- arg.Type = newGinkgoTInterface()
- }
- }
-}
-
-/*
- * Replaces *testing.T types inside of a struct declaration with a GinkgoT
- * eg: type foo struct { *testing.T }
- */
-func replaceTestingTsInStructType(structType *ast.StructType) {
- for _, field := range structType.Fields.List {
- starExpr, ok := field.Type.(*ast.StarExpr)
- if !ok {
- continue
- }
-
- selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
- if !ok {
- continue
- }
-
- xIdent, ok := selectorExpr.X.(*ast.Ident)
- if !ok {
- continue
- }
-
- if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
- field.Type = newGinkgoTInterface()
- }
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
deleted file mode 100644
index 8e99f56a2..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "os"
-
- "github.com/onsi/ginkgo/ginkgo/convert"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
- "github.com/onsi/ginkgo/types"
-)
-
-func BuildConvertCommand() *Command {
- return &Command{
- Name: "convert",
- FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
- UsageCommand: "ginkgo convert /path/to/package",
- Usage: []string{
- "Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
- },
- Command: convertPackage,
- }
-}
-
-func convertPackage(args []string, additionalArgs []string) {
- deprecationTracker := types.NewDeprecationTracker()
- deprecationTracker.TrackDeprecation(types.Deprecations.Convert())
- fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
-
- if len(args) != 1 {
- println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
- os.Exit(1)
- }
-
- defer func() {
- err := recover()
- if err != nil {
- switch err := err.(type) {
- case error:
- println(err.Error())
- case string:
- println(err)
- default:
- println(fmt.Sprintf("unexpected error: %#v", err))
- }
- os.Exit(1)
- }
- }()
-
- convert.RewritePackage(args[0])
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
deleted file mode 100644
index f79271676..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "text/template"
-
- sprig "github.com/go-task/slim-sprig"
-)
-
-func BuildGenerateCommand() *Command {
- var (
- agouti, noDot, internal bool
- customTestFile string
- )
- flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
- flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
- flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
- flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name")
- flagSet.StringVar(&customTestFile, "template", "", "If specified, generate will use the contents of the file passed as the test file template")
-
- return &Command{
- Name: "generate",
- FlagSet: flagSet,
- UsageCommand: "ginkgo generate ",
- Usage: []string{
- "Generate a test file named filename_test.go",
- "If the optional argument is omitted, a file named after the package in the current directory will be created.",
- "Accepts the following flags:",
- },
- Command: func(args []string, additionalArgs []string) {
- generateSpec(args, agouti, noDot, internal, customTestFile)
- emitRCAdvertisement()
- },
- }
-}
-
-var specText = `package {{.Package}}
-
-import (
- {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
- {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
-
- {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
-)
-
-var _ = Describe("{{.Subject}}", func() {
-
-})
-`
-
-var agoutiSpecText = `package {{.Package}}
-
-import (
- {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
- {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
- "github.com/sclevine/agouti"
- . "github.com/sclevine/agouti/matchers"
-
- {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
-)
-
-var _ = Describe("{{.Subject}}", func() {
- var page *agouti.Page
-
- BeforeEach(func() {
- var err error
- page, err = agoutiDriver.NewPage()
- Expect(err).NotTo(HaveOccurred())
- })
-
- AfterEach(func() {
- Expect(page.Destroy()).To(Succeed())
- })
-})
-`
-
-type specData struct {
- Package string
- Subject string
- PackageImportPath string
- IncludeImports bool
- ImportPackage bool
-}
-
-func generateSpec(args []string, agouti, noDot, internal bool, customTestFile string) {
- if len(args) == 0 {
- err := generateSpecForSubject("", agouti, noDot, internal, customTestFile)
- if err != nil {
- fmt.Println(err.Error())
- fmt.Println("")
- os.Exit(1)
- }
- fmt.Println("")
- return
- }
-
- var failed bool
- for _, arg := range args {
- err := generateSpecForSubject(arg, agouti, noDot, internal, customTestFile)
- if err != nil {
- failed = true
- fmt.Println(err.Error())
- }
- }
- fmt.Println("")
- if failed {
- os.Exit(1)
- }
-}
-
-func generateSpecForSubject(subject string, agouti, noDot, internal bool, customTestFile string) error {
- packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
- if subject != "" {
- specFilePrefix = formatSubject(subject)
- formattedName = prettifyPackageName(specFilePrefix)
- }
-
- if internal {
- specFilePrefix = specFilePrefix + "_internal"
- }
-
- data := specData{
- Package: determinePackageName(packageName, internal),
- Subject: formattedName,
- PackageImportPath: getPackageImportPath(),
- IncludeImports: !noDot,
- ImportPackage: !internal,
- }
-
- targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
- if fileExists(targetFile) {
- return fmt.Errorf("%s already exists.", targetFile)
- } else {
- fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
- }
-
- f, err := os.Create(targetFile)
- if err != nil {
- return err
- }
- defer f.Close()
-
- var templateText string
- if customTestFile != "" {
- tpl, err := ioutil.ReadFile(customTestFile)
- if err != nil {
- panic(err.Error())
- }
- templateText = string(tpl)
- } else if agouti {
- templateText = agoutiSpecText
- } else {
- templateText = specText
- }
-
- specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Parse(templateText)
- if err != nil {
- return err
- }
-
- specTemplate.Execute(f, data)
- goFmt(targetFile)
- return nil
-}
-
-func formatSubject(name string) string {
- name = strings.Replace(name, "-", "_", -1)
- name = strings.Replace(name, " ", "_", -1)
- name = strings.Split(name, ".go")[0]
- name = strings.Split(name, "_test")[0]
- return name
-}
-
-// moduleName returns module name from go.mod from given module root directory
-func moduleName(modRoot string) string {
- modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
- if err != nil {
- return ""
- }
-
- mod := make([]byte, 128)
- _, err = modFile.Read(mod)
- if err != nil {
- return ""
- }
-
- slashSlash := []byte("//")
- moduleStr := []byte("module")
-
- for len(mod) > 0 {
- line := mod
- mod = nil
- if i := bytes.IndexByte(line, '\n'); i >= 0 {
- line, mod = line[:i], line[i+1:]
- }
- if i := bytes.Index(line, slashSlash); i >= 0 {
- line = line[:i]
- }
- line = bytes.TrimSpace(line)
- if !bytes.HasPrefix(line, moduleStr) {
- continue
- }
- line = line[len(moduleStr):]
- n := len(line)
- line = bytes.TrimSpace(line)
- if len(line) == n || len(line) == 0 {
- continue
- }
-
- if line[0] == '"' || line[0] == '`' {
- p, err := strconv.Unquote(string(line))
- if err != nil {
- return "" // malformed quoted string or multiline module path
- }
- return p
- }
-
- return string(line)
- }
-
- return "" // missing module path
-}
-
-func findModuleRoot(dir string) (root string) {
- dir = filepath.Clean(dir)
-
- // Look for enclosing go.mod.
- for {
- if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
- return dir
- }
- d := filepath.Dir(dir)
- if d == dir {
- break
- }
- dir = d
- }
- return ""
-}
-
-func getPackageImportPath() string {
- workingDir, err := os.Getwd()
- if err != nil {
- panic(err.Error())
- }
-
- sep := string(filepath.Separator)
-
- // Try go.mod file first
- modRoot := findModuleRoot(workingDir)
- if modRoot != "" {
- modName := moduleName(modRoot)
- if modName != "" {
- cd := strings.Replace(workingDir, modRoot, "", -1)
- cd = strings.ReplaceAll(cd, sep, "/")
- return modName + cd
- }
- }
-
- // Fallback to GOPATH structure
- paths := strings.Split(workingDir, sep+"src"+sep)
- if len(paths) == 1 {
- fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
- return "UNKNOWN_PACKAGE_PATH"
- }
- return filepath.ToSlash(paths[len(paths)-1])
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/help_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
deleted file mode 100644
index db3f40406..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
-)
-
-func BuildHelpCommand() *Command {
- return &Command{
- Name: "help",
- FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
- UsageCommand: "ginkgo help ",
- Usage: []string{
- "Print usage information. If a command is passed in, print usage information just for that command.",
- },
- Command: printHelp,
- }
-}
-
-func printHelp(args []string, additionalArgs []string) {
- if len(args) == 0 {
- usage()
- emitRCAdvertisement()
- } else {
- command, found := commandMatching(args[0])
- if !found {
- complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
- }
-
- usageForCommand(command, true)
- emitRCAdvertisement()
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
deleted file mode 100644
index ec456bf29..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package interrupthandler
-
-import (
- "os"
- "os/signal"
- "sync"
- "syscall"
-)
-
-type InterruptHandler struct {
- interruptCount int
- lock *sync.Mutex
- C chan bool
-}
-
-func NewInterruptHandler() *InterruptHandler {
- h := &InterruptHandler{
- lock: &sync.Mutex{},
- C: make(chan bool),
- }
-
- go h.handleInterrupt()
- SwallowSigQuit()
-
- return h
-}
-
-func (h *InterruptHandler) WasInterrupted() bool {
- h.lock.Lock()
- defer h.lock.Unlock()
-
- return h.interruptCount > 0
-}
-
-func (h *InterruptHandler) handleInterrupt() {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
-
- <-c
- signal.Stop(c)
-
- h.lock.Lock()
- h.interruptCount++
- if h.interruptCount == 1 {
- close(h.C)
- } else if h.interruptCount > 5 {
- os.Exit(1)
- }
- h.lock.Unlock()
-
- go h.handleInterrupt()
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/ginkgo/main.go
deleted file mode 100644
index ae0e1daf6..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/main.go
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
-The Ginkgo CLI
-
-The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
-
-You can also learn more by running:
-
- ginkgo help
-
-Here are some of the more commonly used commands:
-
-To install:
-
- go install github.com/onsi/ginkgo/ginkgo
-
-To run tests:
-
- ginkgo
-
-To run tests in all subdirectories:
-
- ginkgo -r
-
-To run tests in particular packages:
-
- ginkgo /path/to/package /path/to/another/package
-
-To pass arguments/flags to your tests:
-
- ginkgo --
-
-To run tests in parallel
-
- ginkgo -p
-
-this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
-
- ginkgo -nodes=N
-
-(note that you don't need to provide -p in this case).
-
-By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
-An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
-
- ginkgo -nodes=N -stream=true
-
-On windows, the default value for stream is true.
-
-By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
-
- ginkgo -keepGoing
-
-To fail if there are ginkgo tests in a directory but no test suite (missing `RunSpecs`)
-
- ginkgo -requireSuite
-
-To monitor packages and rerun tests when changes occur:
-
- ginkgo watch <-r>
-
-passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
-`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
-that depend on X are not rerun.
-
-[OSX & Linux only] To receive (desktop) notifications when a test run completes:
-
- ginkgo -notify
-
-this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
-
-Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
-
- ginkgo -untilItFails
-
-To bootstrap a test suite:
-
- ginkgo bootstrap
-
-To generate a test file:
-
- ginkgo generate
-
-To bootstrap/generate test files without using "." imports:
-
- ginkgo bootstrap --nodot
- ginkgo generate --nodot
-
-this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
-
- ginkgo nodot
-
-to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
-
-To convert an existing XUnit style test suite to a Ginkgo-style test suite:
-
- ginkgo convert .
-
-To unfocus tests:
-
- ginkgo unfocus
-
-or
-
- ginkgo blur
-
-To compile a test suite:
-
- ginkgo build
-
-will output an executable file named `package.test`. This can be run directly or by invoking
-
- ginkgo
-
-
-To print an outline of Ginkgo specs and containers in a file:
-
- gingko outline
-
-To print out Ginkgo's version:
-
- ginkgo version
-
-To get more help:
-
- ginkgo help
-*/
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/formatter"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
-)
-
-const greenColor = "\x1b[32m"
-const redColor = "\x1b[91m"
-const defaultStyle = "\x1b[0m"
-const lightGrayColor = "\x1b[37m"
-
-type Command struct {
- Name string
- AltName string
- FlagSet *flag.FlagSet
- Usage []string
- UsageCommand string
- Command func(args []string, additionalArgs []string)
- SuppressFlagDocumentation bool
- FlagDocSubstitute []string
-}
-
-func (c *Command) Matches(name string) bool {
- return c.Name == name || (c.AltName != "" && c.AltName == name)
-}
-
-func (c *Command) Run(args []string, additionalArgs []string) {
- c.FlagSet.Usage = usage
- c.FlagSet.Parse(args)
- c.Command(c.FlagSet.Args(), additionalArgs)
-}
-
-var DefaultCommand *Command
-var Commands []*Command
-
-func init() {
- DefaultCommand = BuildRunCommand()
- Commands = append(Commands, BuildWatchCommand())
- Commands = append(Commands, BuildBuildCommand())
- Commands = append(Commands, BuildBootstrapCommand())
- Commands = append(Commands, BuildGenerateCommand())
- Commands = append(Commands, BuildNodotCommand())
- Commands = append(Commands, BuildConvertCommand())
- Commands = append(Commands, BuildUnfocusCommand())
- Commands = append(Commands, BuildVersionCommand())
- Commands = append(Commands, BuildHelpCommand())
- Commands = append(Commands, BuildOutlineCommand())
-}
-
-func main() {
- args := []string{}
- additionalArgs := []string{}
-
- foundDelimiter := false
-
- for _, arg := range os.Args[1:] {
- if !foundDelimiter {
- if arg == "--" {
- foundDelimiter = true
- continue
- }
- }
-
- if foundDelimiter {
- additionalArgs = append(additionalArgs, arg)
- } else {
- args = append(args, arg)
- }
- }
-
- if len(args) > 0 {
- commandToRun, found := commandMatching(args[0])
- if found {
- commandToRun.Run(args[1:], additionalArgs)
- return
- }
- }
-
- DefaultCommand.Run(args, additionalArgs)
-}
-
-func commandMatching(name string) (*Command, bool) {
- for _, command := range Commands {
- if command.Matches(name) {
- return command, true
- }
- }
- return nil, false
-}
-
-func usage() {
- fmt.Printf("Ginkgo Version %s\n\n", config.VERSION)
- usageForCommand(DefaultCommand, false)
- for _, command := range Commands {
- fmt.Printf("\n")
- usageForCommand(command, false)
- }
-}
-
-func usageForCommand(command *Command, longForm bool) {
- fmt.Printf("%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
- fmt.Printf("%s\n", strings.Join(command.Usage, "\n"))
- if command.SuppressFlagDocumentation && !longForm {
- fmt.Printf("%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
- } else {
- command.FlagSet.SetOutput(os.Stdout)
- command.FlagSet.PrintDefaults()
- }
-}
-
-func complainAndQuit(complaint string) {
- fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
- emitRCAdvertisement()
- os.Exit(1)
-}
-
-func findSuites(args []string, recurseForAll bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
- suites := []testsuite.TestSuite{}
-
- if len(args) > 0 {
- for _, arg := range args {
- if allowPrecompiled {
- suite, err := testsuite.PrecompiledTestSuite(arg)
- if err == nil {
- suites = append(suites, suite)
- continue
- }
- }
- recurseForSuite := recurseForAll
- if strings.HasSuffix(arg, "/...") && arg != "/..." {
- arg = arg[:len(arg)-4]
- recurseForSuite = true
- }
- suites = append(suites, testsuite.SuitesInDir(arg, recurseForSuite)...)
- }
- } else {
- suites = testsuite.SuitesInDir(".", recurseForAll)
- }
-
- skippedPackages := []string{}
- if skipPackage != "" {
- skipFilters := strings.Split(skipPackage, ",")
- filteredSuites := []testsuite.TestSuite{}
- for _, suite := range suites {
- skip := false
- for _, skipFilter := range skipFilters {
- if strings.Contains(suite.Path, skipFilter) {
- skip = true
- break
- }
- }
- if skip {
- skippedPackages = append(skippedPackages, suite.Path)
- } else {
- filteredSuites = append(filteredSuites, suite)
- }
- }
- suites = filteredSuites
- }
-
- return suites, skippedPackages
-}
-
-func goFmt(path string) {
- out, err := exec.Command("go", "fmt", path).CombinedOutput()
- if err != nil {
- complainAndQuit("Could not fmt: " + err.Error() + "\n" + string(out))
- }
-}
-
-func pluralizedWord(singular, plural string, count int) string {
- if count == 1 {
- return singular
- }
- return plural
-}
-
-func emitRCAdvertisement() {
- ackRC := os.Getenv("ACK_GINKGO_RC")
- if ackRC != "" {
- return
- }
- home, err := os.UserHomeDir()
- if err == nil {
- _, err := os.Stat(filepath.Join(home, ".ack-ginkgo-rc"))
- if err == nil {
- return
- }
- }
-
- out := formatter.F("\n{{light-yellow}}Ginkgo 2.0 is coming soon!{{/}}\n")
- out += formatter.F("{{light-yellow}}=========================={{/}}\n")
- out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n")
- out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n")
- out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n")
- out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n")
- out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
- out += formatter.F("To {{bold}}{{coral}}silence this notice{{/}}, set the environment variable: {{bold}}ACK_GINKGO_RC=true{{/}}\n")
- out += formatter.F("Alternatively you can: {{bold}}touch $HOME/.ack-ginkgo-rc{{/}}")
-
- fmt.Println(out)
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
deleted file mode 100644
index c87b72165..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package nodot
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "path/filepath"
- "strings"
-)
-
-func ApplyNoDot(data []byte) ([]byte, error) {
- sections, err := generateNodotSections()
- if err != nil {
- return nil, err
- }
-
- for _, section := range sections {
- data = section.createOrUpdateIn(data)
- }
-
- return data, nil
-}
-
-type nodotSection struct {
- name string
- pkg string
- declarations []string
- types []string
-}
-
-func (s nodotSection) createOrUpdateIn(data []byte) []byte {
- renames := map[string]string{}
-
- contents := string(data)
-
- lines := strings.Split(contents, "\n")
-
- comment := "// Declarations for " + s.name
-
- newLines := []string{}
- for _, line := range lines {
- if line == comment {
- continue
- }
-
- words := strings.Split(line, " ")
- lastWord := words[len(words)-1]
-
- if s.containsDeclarationOrType(lastWord) {
- renames[lastWord] = words[1]
- continue
- }
-
- newLines = append(newLines, line)
- }
-
- if len(newLines[len(newLines)-1]) > 0 {
- newLines = append(newLines, "")
- }
-
- newLines = append(newLines, comment)
-
- for _, typ := range s.types {
- name, ok := renames[s.prefix(typ)]
- if !ok {
- name = typ
- }
- newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
- }
-
- for _, decl := range s.declarations {
- name, ok := renames[s.prefix(decl)]
- if !ok {
- name = decl
- }
- newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
- }
-
- newLines = append(newLines, "")
-
- newContents := strings.Join(newLines, "\n")
-
- return []byte(newContents)
-}
-
-func (s nodotSection) prefix(declOrType string) string {
- return s.pkg + "." + declOrType
-}
-
-func (s nodotSection) containsDeclarationOrType(word string) bool {
- for _, declaration := range s.declarations {
- if s.prefix(declaration) == word {
- return true
- }
- }
-
- for _, typ := range s.types {
- if s.prefix(typ) == word {
- return true
- }
- }
-
- return false
-}
-
-func generateNodotSections() ([]nodotSection, error) {
- sections := []nodotSection{}
-
- declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
- if err != nil {
- return nil, err
- }
- sections = append(sections, nodotSection{
- name: "Ginkgo DSL",
- pkg: "ginkgo",
- declarations: declarations,
- types: []string{"Done", "Benchmarker"},
- })
-
- declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
- if err != nil {
- return nil, err
- }
- sections = append(sections, nodotSection{
- name: "Gomega DSL",
- pkg: "gomega",
- declarations: declarations,
- })
-
- declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
- if err != nil {
- return nil, err
- }
- sections = append(sections, nodotSection{
- name: "Gomega Matchers",
- pkg: "gomega",
- declarations: declarations,
- })
-
- return sections, nil
-}
-
-func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
- pkg, err := build.Import(pkgPath, ".", 0)
- if err != nil {
- return []string{}, err
- }
-
- declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
- if err != nil {
- return []string{}, err
- }
-
- blacklistLookup := map[string]bool{}
- for _, declaration := range blacklist {
- blacklistLookup[declaration] = true
- }
-
- filteredDeclarations := []string{}
- for _, declaration := range declarations {
- if blacklistLookup[declaration] {
- continue
- }
- filteredDeclarations = append(filteredDeclarations, declaration)
- }
-
- return filteredDeclarations, nil
-}
-
-func getExportedDeclarationsForFile(path string) ([]string, error) {
- fset := token.NewFileSet()
- tree, err := parser.ParseFile(fset, path, nil, 0)
- if err != nil {
- return []string{}, err
- }
-
- declarations := []string{}
- ast.FileExports(tree)
- for _, decl := range tree.Decls {
- switch x := decl.(type) {
- case *ast.GenDecl:
- switch s := x.Specs[0].(type) {
- case *ast.ValueSpec:
- declarations = append(declarations, s.Names[0].Name)
- }
- case *ast.FuncDecl:
- if x.Recv == nil {
- declarations = append(declarations, x.Name.Name)
- }
- }
- }
-
- return declarations, nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
deleted file mode 100644
index 39b88b5d1..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package main
-
-import (
- "bufio"
- "flag"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
-
- "github.com/onsi/ginkgo/ginkgo/nodot"
-)
-
-func BuildNodotCommand() *Command {
- return &Command{
- Name: "nodot",
- FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
- UsageCommand: "ginkgo nodot",
- Usage: []string{
- "Update the nodot declarations in your test suite",
- "Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
- "If you've renamed a declaration, that name will be honored and not overwritten.",
- },
- Command: updateNodot,
- }
-}
-
-func updateNodot(args []string, additionalArgs []string) {
- suiteFile, perm := findSuiteFile()
-
- data, err := ioutil.ReadFile(suiteFile)
- if err != nil {
- complainAndQuit("Failed to update nodot declarations: " + err.Error())
- }
-
- content, err := nodot.ApplyNoDot(data)
- if err != nil {
- complainAndQuit("Failed to update nodot declarations: " + err.Error())
- }
- ioutil.WriteFile(suiteFile, content, perm)
-
- goFmt(suiteFile)
-}
-
-func findSuiteFile() (string, os.FileMode) {
- workingDir, err := os.Getwd()
- if err != nil {
- complainAndQuit("Could not find suite file for nodot: " + err.Error())
- }
-
- files, err := ioutil.ReadDir(workingDir)
- if err != nil {
- complainAndQuit("Could not find suite file for nodot: " + err.Error())
- }
-
- re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
-
- for _, file := range files {
- if file.IsDir() {
- continue
- }
- path := filepath.Join(workingDir, file.Name())
- f, err := os.Open(path)
- if err != nil {
- complainAndQuit("Could not find suite file for nodot: " + err.Error())
- }
- defer f.Close()
-
- if re.MatchReader(bufio.NewReader(f)) {
- return path, file.Mode()
- }
- }
-
- complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
-
- return "", 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go b/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
deleted file mode 100644
index 368d61fb3..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package main
-
-import (
- "fmt"
- "os"
- "os/exec"
- "regexp"
- "runtime"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
-)
-
-type Notifier struct {
- commandFlags *RunWatchAndBuildCommandFlags
-}
-
-func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
- return &Notifier{
- commandFlags: commandFlags,
- }
-}
-
-func (n *Notifier) VerifyNotificationsAreAvailable() {
- if n.commandFlags.Notify {
- onLinux := (runtime.GOOS == "linux")
- onOSX := (runtime.GOOS == "darwin")
- if onOSX {
-
- _, err := exec.LookPath("terminal-notifier")
- if err != nil {
- fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
-
-OSX:
-
-To remedy this:
-
- brew install terminal-notifier
-
-To learn more about terminal-notifier:
-
- https://github.com/alloy/terminal-notifier
-`)
- os.Exit(1)
- }
-
- } else if onLinux {
-
- _, err := exec.LookPath("notify-send")
- if err != nil {
- fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
-
-Linux:
-
-Download and install notify-send for your distribution
-`)
- os.Exit(1)
- }
-
- }
- }
-}
-
-func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
- if suitePassed {
- n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
- } else {
- n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
- }
-}
-
-func (n *Notifier) SendNotification(title string, subtitle string) {
-
- if n.commandFlags.Notify {
- onLinux := (runtime.GOOS == "linux")
- onOSX := (runtime.GOOS == "darwin")
-
- if onOSX {
-
- _, err := exec.LookPath("terminal-notifier")
- if err == nil {
- args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
- terminal := os.Getenv("TERM_PROGRAM")
- if terminal == "iTerm.app" {
- args = append(args, "-activate", "com.googlecode.iterm2")
- } else if terminal == "Apple_Terminal" {
- args = append(args, "-activate", "com.apple.Terminal")
- }
-
- exec.Command("terminal-notifier", args...).Run()
- }
-
- } else if onLinux {
-
- _, err := exec.LookPath("notify-send")
- if err == nil {
- args := []string{"-a", "ginkgo", title, subtitle}
- exec.Command("notify-send", args...).Run()
- }
-
- }
- }
-}
-
-func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
-
- command := n.commandFlags.AfterSuiteHook
- if command != "" {
-
- // Allow for string replacement to pass input to the command
- passed := "[FAIL]"
- if suitePassed {
- passed = "[PASS]"
- }
- command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
- command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
-
- // Must break command into parts
- splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
- parts := splitArgs.FindAllString(command, -1)
-
- output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
- if err != nil {
- fmt.Println("Post-suite command failed:")
- if config.DefaultReporterConfig.NoColor {
- fmt.Printf("\t%s\n", output)
- } else {
- fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
- }
- n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
- } else {
- fmt.Println("Post-suite command succeeded:")
- if config.DefaultReporterConfig.NoColor {
- fmt.Printf("\t%s\n", output)
- } else {
- fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
- }
- }
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go
deleted file mode 100644
index 96ca7ad27..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "go/parser"
- "go/token"
- "os"
-
- "github.com/onsi/ginkgo/ginkgo/outline"
-)
-
-const (
- // indentWidth is the width used by the 'indent' output
- indentWidth = 4
- // stdinAlias is a portable alias for stdin. This convention is used in
- // other CLIs, e.g., kubectl.
- stdinAlias = "-"
- usageCommand = "ginkgo outline "
-)
-
-func BuildOutlineCommand() *Command {
- const defaultFormat = "csv"
- var format string
- flagSet := flag.NewFlagSet("outline", flag.ExitOnError)
- flagSet.StringVar(&format, "format", defaultFormat, "Format of outline. Accepted: 'csv', 'indent', 'json'")
- return &Command{
- Name: "outline",
- FlagSet: flagSet,
- UsageCommand: usageCommand,
- Usage: []string{
- "Create an outline of Ginkgo symbols for a file",
- "To read from stdin, use: `ginkgo outline -`",
- "Accepts the following flags:",
- },
- Command: func(args []string, additionalArgs []string) {
- outlineFile(args, format)
- },
- }
-}
-
-func outlineFile(args []string, format string) {
- if len(args) != 1 {
- println(fmt.Sprintf("usage: %s", usageCommand))
- os.Exit(1)
- }
-
- filename := args[0]
- var src *os.File
- if filename == stdinAlias {
- src = os.Stdin
- } else {
- var err error
- src, err = os.Open(filename)
- if err != nil {
- println(fmt.Sprintf("error opening file: %s", err))
- os.Exit(1)
- }
- }
-
- fset := token.NewFileSet()
-
- parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
- if err != nil {
- println(fmt.Sprintf("error parsing source: %s", err))
- os.Exit(1)
- }
-
- o, err := outline.FromASTFile(fset, parsedSrc)
- if err != nil {
- println(fmt.Sprintf("error creating outline: %s", err))
- os.Exit(1)
- }
-
- var oerr error
- switch format {
- case "csv":
- _, oerr = fmt.Print(o)
- case "indent":
- _, oerr = fmt.Print(o.StringIndent(indentWidth))
- case "json":
- b, err := json.Marshal(o)
- if err != nil {
- println(fmt.Sprintf("error marshalling to json: %s", err))
- }
- _, oerr = fmt.Println(string(b))
- default:
- complainAndQuit(fmt.Sprintf("format %s not accepted", format))
- }
- if oerr != nil {
- println(fmt.Sprintf("error writing outline: %s", oerr))
- os.Exit(1)
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
deleted file mode 100644
index f3d4e99a5..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "math/rand"
- "os"
- "regexp"
- "runtime"
- "strings"
- "time"
-
- "io/ioutil"
- "path/filepath"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/ginkgo/interrupthandler"
- "github.com/onsi/ginkgo/ginkgo/testrunner"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
- "github.com/onsi/ginkgo/types"
-)
-
-func BuildRunCommand() *Command {
- commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
- notifier := NewNotifier(commandFlags)
- interruptHandler := interrupthandler.NewInterruptHandler()
- runner := &SpecRunner{
- commandFlags: commandFlags,
- notifier: notifier,
- interruptHandler: interruptHandler,
- suiteRunner: NewSuiteRunner(notifier, interruptHandler),
- }
-
- return &Command{
- Name: "",
- FlagSet: commandFlags.FlagSet,
- UsageCommand: "ginkgo -- ",
- Usage: []string{
- "Run the tests in the passed in (or the package in the current directory if left blank).",
- "Any arguments after -- will be passed to the test.",
- "Accepts the following flags:",
- },
- Command: runner.RunSpecs,
- }
-}
-
-type SpecRunner struct {
- commandFlags *RunWatchAndBuildCommandFlags
- notifier *Notifier
- interruptHandler *interrupthandler.InterruptHandler
- suiteRunner *SuiteRunner
-}
-
-func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
- r.commandFlags.computeNodes()
- r.notifier.VerifyNotificationsAreAvailable()
-
- deprecationTracker := types.NewDeprecationTracker()
-
- if r.commandFlags.ParallelStream && (runtime.GOOS != "windows") {
- deprecationTracker.TrackDeprecation(types.Deprecation{
- Message: "--stream is deprecated and will be removed in Ginkgo 2.0",
- DocLink: "removed--stream",
- Version: "1.16.0",
- })
- }
-
- if r.commandFlags.Notify {
- deprecationTracker.TrackDeprecation(types.Deprecation{
- Message: "--notify is deprecated and will be removed in Ginkgo 2.0",
- DocLink: "removed--notify",
- Version: "1.16.0",
- })
- }
-
- if deprecationTracker.DidTrackDeprecations() {
- fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
- }
-
- suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
- if len(skippedPackages) > 0 {
- fmt.Println("Will skip:")
- for _, skippedPackage := range skippedPackages {
- fmt.Println(" " + skippedPackage)
- }
- }
-
- if len(skippedPackages) > 0 && len(suites) == 0 {
- fmt.Println("All tests skipped! Exiting...")
- os.Exit(0)
- }
-
- if len(suites) == 0 {
- complainAndQuit("Found no test suites")
- }
-
- r.ComputeSuccinctMode(len(suites))
-
- t := time.Now()
-
- runners := []*testrunner.TestRunner{}
- for _, suite := range suites {
- runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs))
- }
-
- numSuites := 0
- runResult := testrunner.PassingRunResult()
- if r.commandFlags.UntilItFails {
- iteration := 0
- for {
- r.UpdateSeed()
- randomizedRunners := r.randomizeOrder(runners)
- runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
- iteration++
-
- if r.interruptHandler.WasInterrupted() {
- break
- }
-
- if runResult.Passed {
- fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
- } else {
- fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
- break
- }
- }
- } else {
- randomizedRunners := r.randomizeOrder(runners)
- runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
- }
-
- for _, runner := range runners {
- runner.CleanUp()
- }
-
- if r.isInCoverageMode() {
- if r.getOutputDir() != "" {
- // If coverprofile is set, combine coverages
- if r.getCoverprofile() != "" {
- if err := r.combineCoverprofiles(runners); err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
- } else {
- // Just move them
- r.moveCoverprofiles(runners)
- }
- }
- }
-
- fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
-
- if runResult.Passed {
- if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
- fmt.Printf("Test Suite Passed\n")
- fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
- os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
- } else {
- fmt.Printf("Test Suite Passed\n")
- os.Exit(0)
- }
- } else {
- fmt.Printf("Test Suite Failed\n")
- emitRCAdvertisement()
- os.Exit(1)
- }
-}
-
-// Moves all generated profiles to specified directory
-func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) {
- for _, runner := range runners {
- _, filename := filepath.Split(runner.CoverageFile)
- err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename))
-
- if err != nil {
- fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err)
- return
- }
- }
-}
-
-// Combines all generated profiles in the specified directory
-func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error {
-
- path, _ := filepath.Abs(r.getOutputDir())
- if !fileExists(path) {
- return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir())
- }
-
- fmt.Println("path is " + path)
-
- combined, err := os.OpenFile(
- filepath.Join(path, r.getCoverprofile()),
- os.O_WRONLY|os.O_CREATE,
- 0666,
- )
-
- if err != nil {
- fmt.Printf("Unable to create combined profile, %v\n", err)
- return nil // non-fatal error
- }
-
- modeRegex := regexp.MustCompile(`^mode: .*\n`)
- for index, runner := range runners {
- contents, err := ioutil.ReadFile(runner.CoverageFile)
-
- if err != nil {
- fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
- return nil // non-fatal error
- }
-
- // remove the cover mode line from every file
- // except the first one
- if index > 0 {
- contents = modeRegex.ReplaceAll(contents, []byte{})
- }
-
- _, err = combined.Write(contents)
-
- // Add a newline to the end of every file if missing.
- if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
- _, err = combined.Write([]byte("\n"))
- }
-
- if err != nil {
- fmt.Printf("Unable to append to coverprofile, %v\n", err)
- return nil // non-fatal error
- }
- }
-
- fmt.Println("All profiles combined")
- return nil
-}
-
-func (r *SpecRunner) isInCoverageMode() bool {
- opts := r.commandFlags.GoOpts
- return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != ""
-}
-
-func (r *SpecRunner) getCoverprofile() string {
- return *r.commandFlags.GoOpts["coverprofile"].(*string)
-}
-
-func (r *SpecRunner) getOutputDir() string {
- return *r.commandFlags.GoOpts["outputdir"].(*string)
-}
-
-func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
- if config.DefaultReporterConfig.Verbose {
- config.DefaultReporterConfig.Succinct = false
- return
- }
-
- if numSuites == 1 {
- return
- }
-
- if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
- config.DefaultReporterConfig.Succinct = true
- }
-}
-
-func (r *SpecRunner) UpdateSeed() {
- if !r.commandFlags.wasSet("seed") {
- config.GinkgoConfig.RandomSeed = time.Now().Unix()
- }
-}
-
-func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
- if !r.commandFlags.RandomizeSuites {
- return runners
- }
-
- if len(runners) <= 1 {
- return runners
- }
-
- randomizedRunners := make([]*testrunner.TestRunner, len(runners))
- randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
- permutation := randomizer.Perm(len(runners))
- for i, j := range permutation {
- randomizedRunners[i] = runners[j]
- }
- return randomizedRunners
-}
-
-func orcMessage(iteration int) string {
- if iteration < 10 {
- return ""
- } else if iteration < 30 {
- return []string{
- "If at first you succeed...",
- "...try, try again.",
- "Looking good!",
- "Still good...",
- "I think your tests are fine....",
- "Yep, still passing",
- "Oh boy, here I go testin' again!",
- "Even the gophers are getting bored",
- "Did you try -race?",
- "Maybe you should stop now?",
- "I'm getting tired...",
- "What if I just made you a sandwich?",
- "Hit ^C, hit ^C, please hit ^C",
- "Make it stop. Please!",
- "Come on! Enough is enough!",
- "Dave, this conversation can serve no purpose anymore. Goodbye.",
- "Just what do you think you're doing, Dave? ",
- "I, Sisyphus",
- "Insanity: doing the same thing over and over again and expecting different results. -Einstein",
- "I guess Einstein never tried to churn butter",
- }[iteration-10] + "\n"
- } else {
- return "No, seriously... you can probably stop now.\n"
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
deleted file mode 100644
index e0994fc3c..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package main
-
-import (
- "flag"
- "runtime"
-
- "time"
-
- "github.com/onsi/ginkgo/config"
-)
-
-type RunWatchAndBuildCommandFlags struct {
- Recurse bool
- SkipPackage string
- GoOpts map[string]interface{}
-
- //for run and watch commands
- NumCPU int
- NumCompilers int
- ParallelStream bool
- Notify bool
- AfterSuiteHook string
- AutoNodes bool
- Timeout time.Duration
-
- //only for run command
- KeepGoing bool
- UntilItFails bool
- RandomizeSuites bool
-
- //only for watch command
- Depth int
- WatchRegExp string
-
- FlagSet *flag.FlagSet
-}
-
-const runMode = 1
-const watchMode = 2
-const buildMode = 3
-
-func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
- c := &RunWatchAndBuildCommandFlags{
- FlagSet: flagSet,
- }
- c.flags(runMode)
- return c
-}
-
-func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
- c := &RunWatchAndBuildCommandFlags{
- FlagSet: flagSet,
- }
- c.flags(watchMode)
- return c
-}
-
-func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
- c := &RunWatchAndBuildCommandFlags{
- FlagSet: flagSet,
- }
- c.flags(buildMode)
- return c
-}
-
-func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
- wasSet := false
- c.FlagSet.Visit(func(f *flag.Flag) {
- if f.Name == flagName {
- wasSet = true
- }
- })
-
- return wasSet
-}
-
-func (c *RunWatchAndBuildCommandFlags) computeNodes() {
- if c.wasSet("nodes") {
- return
- }
- if c.AutoNodes {
- switch n := runtime.NumCPU(); {
- case n <= 4:
- c.NumCPU = n
- default:
- c.NumCPU = n - 1
- }
- }
-}
-
-func (c *RunWatchAndBuildCommandFlags) stringSlot(slot string) *string {
- var opt string
- c.GoOpts[slot] = &opt
- return &opt
-}
-
-func (c *RunWatchAndBuildCommandFlags) boolSlot(slot string) *bool {
- var opt bool
- c.GoOpts[slot] = &opt
- return &opt
-}
-
-func (c *RunWatchAndBuildCommandFlags) intSlot(slot string) *int {
- var opt int
- c.GoOpts[slot] = &opt
- return &opt
-}
-
-func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
- c.GoOpts = make(map[string]interface{})
-
- onWindows := (runtime.GOOS == "windows")
-
- c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively.")
- c.FlagSet.BoolVar(c.boolSlot("race"), "race", false, "Run tests with race detection enabled.")
- c.FlagSet.BoolVar(c.boolSlot("cover"), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory.")
- c.FlagSet.StringVar(c.stringSlot("coverpkg"), "coverpkg", "", "Run tests with coverage on the given external modules.")
- c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
- c.FlagSet.StringVar(c.stringSlot("tags"), "tags", "", "A list of build tags to consider satisfied during the build.")
- c.FlagSet.StringVar(c.stringSlot("gcflags"), "gcflags", "", "Arguments to pass on each go tool compile invocation.")
- c.FlagSet.StringVar(c.stringSlot("covermode"), "covermode", "", "Set the mode for coverage analysis.")
- c.FlagSet.BoolVar(c.boolSlot("a"), "a", false, "Force rebuilding of packages that are already up-to-date.")
- c.FlagSet.BoolVar(c.boolSlot("n"), "n", false, "Have `go test` print the commands but do not run them.")
- c.FlagSet.BoolVar(c.boolSlot("msan"), "msan", false, "Enable interoperation with memory sanitizer.")
- c.FlagSet.BoolVar(c.boolSlot("x"), "x", false, "Have `go test` print the commands.")
- c.FlagSet.BoolVar(c.boolSlot("work"), "work", false, "Print the name of the temporary work directory and do not delete it when exiting.")
- c.FlagSet.StringVar(c.stringSlot("asmflags"), "asmflags", "", "Arguments to pass on each go tool asm invocation.")
- c.FlagSet.StringVar(c.stringSlot("buildmode"), "buildmode", "", "Build mode to use. See 'go help buildmode' for more.")
- c.FlagSet.StringVar(c.stringSlot("mod"), "mod", "", "Go module control. See 'go help modules' for more.")
- c.FlagSet.StringVar(c.stringSlot("compiler"), "compiler", "", "Name of compiler to use, as in runtime.Compiler (gccgo or gc).")
- c.FlagSet.StringVar(c.stringSlot("gccgoflags"), "gccgoflags", "", "Arguments to pass on each gccgo compiler/linker invocation.")
- c.FlagSet.StringVar(c.stringSlot("installsuffix"), "installsuffix", "", "A suffix to use in the name of the package installation directory.")
- c.FlagSet.StringVar(c.stringSlot("ldflags"), "ldflags", "", "Arguments to pass on each go tool link invocation.")
- c.FlagSet.BoolVar(c.boolSlot("linkshared"), "linkshared", false, "Link against shared libraries previously created with -buildmode=shared.")
- c.FlagSet.StringVar(c.stringSlot("pkgdir"), "pkgdir", "", "install and load all packages from the given dir instead of the usual locations.")
- c.FlagSet.StringVar(c.stringSlot("toolexec"), "toolexec", "", "a program to use to invoke toolchain programs like vet and asm.")
- c.FlagSet.IntVar(c.intSlot("blockprofilerate"), "blockprofilerate", 1, "Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with the given value.")
- c.FlagSet.StringVar(c.stringSlot("coverprofile"), "coverprofile", "", "Write a coverage profile to the specified file after all tests have passed.")
- c.FlagSet.StringVar(c.stringSlot("cpuprofile"), "cpuprofile", "", "Write a CPU profile to the specified file before exiting.")
- c.FlagSet.StringVar(c.stringSlot("memprofile"), "memprofile", "", "Write a memory profile to the specified file after all tests have passed.")
- c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.")
- c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.")
- c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)")
- c.FlagSet.StringVar(c.stringSlot("vet"), "vet", "", "Configure the invocation of 'go vet' to use the comma-separated list of vet checks. If list is 'off', 'go test' does not run 'go vet' at all.")
-
- if mode == runMode || mode == watchMode {
- config.Flags(c.FlagSet, "", false)
- c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
- c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
- c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
- c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
- if !onWindows {
- c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
- }
- c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
- c.FlagSet.DurationVar(&(c.Timeout), "timeout", 24*time.Hour, "Suite fails if it does not complete within the specified timeout")
- }
-
- if mode == runMode {
- c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
- c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
- c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
- }
-
- if mode == watchMode {
- c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
- c.FlagSet.StringVar(&(c.WatchRegExp), "watchRegExp", `\.go$`, "Files matching this regular expression will be watched for changes")
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
deleted file mode 100644
index ab746d7e9..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package main
-
-import (
- "fmt"
- "runtime"
- "sync"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/ginkgo/interrupthandler"
- "github.com/onsi/ginkgo/ginkgo/testrunner"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
-)
-
-type compilationInput struct {
- runner *testrunner.TestRunner
- result chan compilationOutput
-}
-
-type compilationOutput struct {
- runner *testrunner.TestRunner
- err error
-}
-
-type SuiteRunner struct {
- notifier *Notifier
- interruptHandler *interrupthandler.InterruptHandler
-}
-
-func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
- return &SuiteRunner{
- notifier: notifier,
- interruptHandler: interruptHandler,
- }
-}
-
-func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
- //we return this to the consumer, it will return each runner in order as it compiles
- compilationOutputs := make(chan compilationOutput, len(runners))
-
- //an array of channels - the nth runner's compilation output is sent to the nth channel in this array
- //we read from these channels in order to ensure we run the suites in order
- orderedCompilationOutputs := []chan compilationOutput{}
- for range runners {
- orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
- }
-
- //we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
- //we prefill the channel then close it, this ensures we compile things in the correct order
- workPool := make(chan compilationInput, len(runners))
- for i, runner := range runners {
- workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
- }
- close(workPool)
-
- //pick a reasonable numCompilers
- if numCompilers == 0 {
- numCompilers = runtime.NumCPU()
- }
-
- //a WaitGroup to help us wait for all compilers to shut down
- wg := &sync.WaitGroup{}
- wg.Add(numCompilers)
-
- //spin up the concurrent compilers
- for i := 0; i < numCompilers; i++ {
- go func() {
- defer wg.Done()
- for input := range workPool {
- if r.interruptHandler.WasInterrupted() {
- return
- }
-
- if willCompile != nil {
- willCompile(input.runner.Suite)
- }
-
- //We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
- var err error
- retries := 0
- for retries <= 5 {
- if r.interruptHandler.WasInterrupted() {
- return
- }
- if err = input.runner.Compile(); err == nil {
- break
- }
- retries++
- }
-
- input.result <- compilationOutput{input.runner, err}
- }
- }()
- }
-
- //read from the compilation output channels *in order* and send them to the caller
- //close the compilationOutputs channel to tell the caller we're done
- go func() {
- defer close(compilationOutputs)
- for _, orderedCompilationOutput := range orderedCompilationOutputs {
- select {
- case compilationOutput := <-orderedCompilationOutput:
- compilationOutputs <- compilationOutput
- case <-r.interruptHandler.C:
- //interrupt detected, wait for the compilers to shut down then bail
- //this ensure we clean up after ourselves as we don't leave any compilation processes running
- wg.Wait()
- return
- }
- }
- }()
-
- return compilationOutputs
-}
-
-func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
- runResult := testrunner.PassingRunResult()
-
- compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
-
- numSuitesThatRan := 0
- suitesThatFailed := []testsuite.TestSuite{}
- for compilationOutput := range compilationOutputs {
- if compilationOutput.err != nil {
- fmt.Print(compilationOutput.err.Error())
- }
- numSuitesThatRan++
- suiteRunResult := testrunner.FailingRunResult()
- if compilationOutput.err == nil {
- suiteRunResult = compilationOutput.runner.Run()
- }
- r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
- r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
- runResult = runResult.Merge(suiteRunResult)
- if !suiteRunResult.Passed {
- suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
- if !keepGoing {
- break
- }
- }
- if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
- fmt.Println("")
- }
- }
-
- if keepGoing && !runResult.Passed {
- r.listFailedSuites(suitesThatFailed)
- }
-
- return runResult, numSuitesThatRan
-}
-
-func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
- fmt.Println("")
- fmt.Println("There were failures detected in the following suites:")
-
- maxPackageNameLength := 0
- for _, suite := range suitesThatFailed {
- if len(suite.PackageName) > maxPackageNameLength {
- maxPackageNameLength = len(suite.PackageName)
- }
- }
-
- packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
-
- for _, suite := range suitesThatFailed {
- if config.DefaultReporterConfig.NoColor {
- fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
- } else {
- fmt.Fprintf(colorable.NewColorableStdout(), "\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
- }
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go
deleted file mode 100644
index 3b1a238c2..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build go1.10
-
-package testrunner
-
-var (
- buildArgs = []string{"test", "-c"}
-)
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go
deleted file mode 100644
index 14d70dbcc..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !go1.10
-
-package testrunner
-
-var (
- buildArgs = []string{"test", "-c", "-i"}
-)
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
deleted file mode 100644
index a73a6e379..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package testrunner
-
-import (
- "bytes"
- "fmt"
- "io"
- "log"
- "strings"
- "sync"
-)
-
-type logWriter struct {
- buffer *bytes.Buffer
- lock *sync.Mutex
- log *log.Logger
-}
-
-func newLogWriter(target io.Writer, node int) *logWriter {
- return &logWriter{
- buffer: &bytes.Buffer{},
- lock: &sync.Mutex{},
- log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
- }
-}
-
-func (w *logWriter) Write(data []byte) (n int, err error) {
- w.lock.Lock()
- defer w.lock.Unlock()
-
- w.buffer.Write(data)
- contents := w.buffer.String()
-
- lines := strings.Split(contents, "\n")
- for _, line := range lines[0 : len(lines)-1] {
- w.log.Println(line)
- }
-
- w.buffer.Reset()
- w.buffer.Write([]byte(lines[len(lines)-1]))
- return len(data), nil
-}
-
-func (w *logWriter) Close() error {
- w.lock.Lock()
- defer w.lock.Unlock()
-
- if w.buffer.Len() > 0 {
- w.log.Println(w.buffer.String())
- }
-
- return nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
deleted file mode 100644
index 5d472acb8..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package testrunner
-
-type RunResult struct {
- Passed bool
- HasProgrammaticFocus bool
-}
-
-func PassingRunResult() RunResult {
- return RunResult{
- Passed: true,
- HasProgrammaticFocus: false,
- }
-}
-
-func FailingRunResult() RunResult {
- return RunResult{
- Passed: false,
- HasProgrammaticFocus: false,
- }
-}
-
-func (r RunResult) Merge(o RunResult) RunResult {
- return RunResult{
- Passed: r.Passed && o.Passed,
- HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
deleted file mode 100644
index 66c0f06f6..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
+++ /dev/null
@@ -1,554 +0,0 @@
-package testrunner
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "strings"
- "syscall"
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
- "github.com/onsi/ginkgo/internal/remote"
- "github.com/onsi/ginkgo/reporters/stenographer"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
- "github.com/onsi/ginkgo/types"
-)
-
-type TestRunner struct {
- Suite testsuite.TestSuite
-
- compiled bool
- compilationTargetPath string
-
- numCPU int
- parallelStream bool
- timeout time.Duration
- goOpts map[string]interface{}
- additionalArgs []string
- stderr *bytes.Buffer
-
- CoverageFile string
-}
-
-func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner {
- runner := &TestRunner{
- Suite: suite,
- numCPU: numCPU,
- parallelStream: parallelStream,
- goOpts: goOpts,
- additionalArgs: additionalArgs,
- timeout: timeout,
- stderr: new(bytes.Buffer),
- }
-
- if !suite.Precompiled {
- runner.compilationTargetPath, _ = filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
- }
-
- return runner
-}
-
-func (t *TestRunner) Compile() error {
- return t.CompileTo(t.compilationTargetPath)
-}
-
-func (t *TestRunner) BuildArgs(path string) []string {
- args := make([]string, len(buildArgs), len(buildArgs)+3)
- copy(args, buildArgs)
- args = append(args, "-o", path, t.Suite.Path)
-
- if t.getCoverMode() != "" {
- args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode()))
- } else {
- if t.shouldCover() || t.getCoverPackage() != "" {
- args = append(args, "-cover", "-covermode=atomic")
- }
- }
-
- boolOpts := []string{
- "a",
- "n",
- "msan",
- "race",
- "x",
- "work",
- "linkshared",
- }
-
- for _, opt := range boolOpts {
- if s, found := t.goOpts[opt].(*bool); found && *s {
- args = append(args, fmt.Sprintf("-%s", opt))
- }
- }
-
- intOpts := []string{
- "memprofilerate",
- "blockprofilerate",
- }
-
- for _, opt := range intOpts {
- if s, found := t.goOpts[opt].(*int); found {
- args = append(args, fmt.Sprintf("-%s=%d", opt, *s))
- }
- }
-
- stringOpts := []string{
- "asmflags",
- "buildmode",
- "compiler",
- "gccgoflags",
- "installsuffix",
- "ldflags",
- "pkgdir",
- "toolexec",
- "coverprofile",
- "cpuprofile",
- "memprofile",
- "outputdir",
- "coverpkg",
- "tags",
- "gcflags",
- "vet",
- "mod",
- }
-
- for _, opt := range stringOpts {
- if s, found := t.goOpts[opt].(*string); found && *s != "" {
- args = append(args, fmt.Sprintf("-%s=%s", opt, *s))
- }
- }
- return args
-}
-
-func (t *TestRunner) CompileTo(path string) error {
- if t.compiled {
- return nil
- }
-
- if t.Suite.Precompiled {
- return nil
- }
-
- args := t.BuildArgs(path)
- cmd := exec.Command("go", args...)
-
- output, err := cmd.CombinedOutput()
-
- if err != nil {
- if len(output) > 0 {
- return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output)
- }
- return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
- }
-
- if len(output) > 0 {
- fmt.Println(string(output))
- }
-
- if !fileExists(path) {
- compiledFile := t.Suite.PackageName + ".test"
- if fileExists(compiledFile) {
- // seems like we are on an old go version that does not support the -o flag on go test
- // move the compiled test file to the desired location by hand
- err = os.Rename(compiledFile, path)
- if err != nil {
- // We cannot move the file, perhaps because the source and destination
- // are on different partitions. We can copy the file, however.
- err = copyFile(compiledFile, path)
- if err != nil {
- return fmt.Errorf("Failed to copy compiled file: %s", err)
- }
- }
- } else {
- return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
- }
- }
-
- t.compiled = true
-
- return nil
-}
-
-func fileExists(path string) bool {
- _, err := os.Stat(path)
- return err == nil || !os.IsNotExist(err)
-}
-
-// copyFile copies the contents of the file named src to the file named
-// by dst. The file will be created if it does not already exist. If the
-// destination file exists, all it's contents will be replaced by the contents
-// of the source file.
-func copyFile(src, dst string) error {
- srcInfo, err := os.Stat(src)
- if err != nil {
- return err
- }
- mode := srcInfo.Mode()
-
- in, err := os.Open(src)
- if err != nil {
- return err
- }
-
- defer in.Close()
-
- out, err := os.Create(dst)
- if err != nil {
- return err
- }
-
- defer func() {
- closeErr := out.Close()
- if err == nil {
- err = closeErr
- }
- }()
-
- _, err = io.Copy(out, in)
- if err != nil {
- return err
- }
-
- err = out.Sync()
- if err != nil {
- return err
- }
-
- return out.Chmod(mode)
-}
-
-func (t *TestRunner) Run() RunResult {
- if t.Suite.IsGinkgo {
- if t.numCPU > 1 {
- if t.parallelStream {
- return t.runAndStreamParallelGinkgoSuite()
- } else {
- return t.runParallelGinkgoSuite()
- }
- } else {
- return t.runSerialGinkgoSuite()
- }
- } else {
- return t.runGoTestSuite()
- }
-}
-
-func (t *TestRunner) CleanUp() {
- if t.Suite.Precompiled {
- return
- }
- os.Remove(t.compilationTargetPath)
-}
-
-func (t *TestRunner) runSerialGinkgoSuite() RunResult {
- ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
- return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
-}
-
-func (t *TestRunner) runGoTestSuite() RunResult {
- return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
-}
-
-func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
- completions := make(chan RunResult)
- writers := make([]*logWriter, t.numCPU)
-
- server, err := remote.NewServer(t.numCPU)
- if err != nil {
- panic("Failed to start parallel spec server")
- }
-
- server.Start()
- defer server.Close()
-
- for cpu := 0; cpu < t.numCPU; cpu++ {
- config.GinkgoConfig.ParallelNode = cpu + 1
- config.GinkgoConfig.ParallelTotal = t.numCPU
- config.GinkgoConfig.SyncHost = server.Address()
-
- ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
-
- writers[cpu] = newLogWriter(os.Stdout, cpu+1)
-
- cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
-
- server.RegisterAlive(cpu+1, func() bool {
- if cmd.ProcessState == nil {
- return true
- }
- return !cmd.ProcessState.Exited()
- })
-
- go t.run(cmd, completions)
- }
-
- res := PassingRunResult()
-
- for cpu := 0; cpu < t.numCPU; cpu++ {
- res = res.Merge(<-completions)
- }
-
- for _, writer := range writers {
- writer.Close()
- }
-
- os.Stdout.Sync()
-
- if t.shouldCombineCoverprofiles() {
- t.combineCoverprofiles()
- }
-
- return res
-}
-
-func (t *TestRunner) runParallelGinkgoSuite() RunResult {
- result := make(chan bool)
- completions := make(chan RunResult)
- writers := make([]*logWriter, t.numCPU)
- reports := make([]*bytes.Buffer, t.numCPU)
-
- stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
- aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
-
- server, err := remote.NewServer(t.numCPU)
- if err != nil {
- panic("Failed to start parallel spec server")
- }
- server.RegisterReporters(aggregator)
- server.Start()
- defer server.Close()
-
- for cpu := 0; cpu < t.numCPU; cpu++ {
- config.GinkgoConfig.ParallelNode = cpu + 1
- config.GinkgoConfig.ParallelTotal = t.numCPU
- config.GinkgoConfig.SyncHost = server.Address()
- config.GinkgoConfig.StreamHost = server.Address()
-
- ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
-
- reports[cpu] = &bytes.Buffer{}
- writers[cpu] = newLogWriter(reports[cpu], cpu+1)
-
- cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
-
- server.RegisterAlive(cpu+1, func() bool {
- if cmd.ProcessState == nil {
- return true
- }
- return !cmd.ProcessState.Exited()
- })
-
- go t.run(cmd, completions)
- }
-
- res := PassingRunResult()
-
- for cpu := 0; cpu < t.numCPU; cpu++ {
- res = res.Merge(<-completions)
- }
-
- //all test processes are done, at this point
- //we should be able to wait for the aggregator to tell us that it's done
-
- select {
- case <-result:
- fmt.Println("")
- case <-time.After(time.Second):
- //the aggregator never got back to us! something must have gone wrong
- fmt.Println(`
- -------------------------------------------------------------------
- | |
- | Ginkgo timed out waiting for all parallel nodes to report back! |
- | |
- -------------------------------------------------------------------`)
- fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path)
- os.Stdout.Sync()
-
- for _, writer := range writers {
- writer.Close()
- }
-
- for _, report := range reports {
- fmt.Print(report.String())
- }
-
- os.Stdout.Sync()
- }
-
- if t.shouldCombineCoverprofiles() {
- t.combineCoverprofiles()
- }
-
- return res
-}
-
-const CoverProfileSuffix = ".coverprofile"
-
-func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
- args := []string{"--test.timeout=" + t.timeout.String()}
-
- coverProfile := t.getCoverProfile()
-
- if t.shouldCombineCoverprofiles() {
-
- testCoverProfile := "--test.coverprofile="
-
- coverageFile := ""
- // Set default name for coverage results
- if coverProfile == "" {
- coverageFile = t.Suite.PackageName + CoverProfileSuffix
- } else {
- coverageFile = coverProfile
- }
-
- testCoverProfile += coverageFile
-
- t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile)
-
- if t.numCPU > 1 {
- testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node)
- }
- args = append(args, testCoverProfile)
- }
-
- args = append(args, ginkgoArgs...)
- args = append(args, t.additionalArgs...)
-
- path := t.compilationTargetPath
- if t.Suite.Precompiled {
- path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
- }
-
- cmd := exec.Command(path, args...)
-
- cmd.Dir = t.Suite.Path
- cmd.Stderr = io.MultiWriter(stream, t.stderr)
- cmd.Stdout = stream
-
- return cmd
-}
-
-func (t *TestRunner) shouldCover() bool {
- return *t.goOpts["cover"].(*bool)
-}
-
-func (t *TestRunner) shouldRequireSuite() bool {
- return *t.goOpts["requireSuite"].(*bool)
-}
-
-func (t *TestRunner) getCoverProfile() string {
- return *t.goOpts["coverprofile"].(*string)
-}
-
-func (t *TestRunner) getCoverPackage() string {
- return *t.goOpts["coverpkg"].(*string)
-}
-
-func (t *TestRunner) getCoverMode() string {
- return *t.goOpts["covermode"].(*string)
-}
-
-func (t *TestRunner) shouldCombineCoverprofiles() bool {
- return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != ""
-}
-
-func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
- var res RunResult
-
- defer func() {
- if completions != nil {
- completions <- res
- }
- }()
-
- err := cmd.Start()
- if err != nil {
- fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
- return res
- }
-
- cmd.Wait()
-
- exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
- res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
- res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
-
- if strings.Contains(t.stderr.String(), "warning: no tests to run") {
- if t.shouldRequireSuite() {
- res.Passed = false
- }
- fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
- }
-
- return res
-}
-
-func (t *TestRunner) combineCoverprofiles() {
- profiles := []string{}
-
- coverProfile := t.getCoverProfile()
-
- for cpu := 1; cpu <= t.numCPU; cpu++ {
- var coverFile string
- if coverProfile == "" {
- coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu)
- } else {
- coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu)
- }
-
- coverFile = filepath.Join(t.Suite.Path, coverFile)
- coverProfile, err := ioutil.ReadFile(coverFile)
- os.Remove(coverFile)
-
- if err == nil {
- profiles = append(profiles, string(coverProfile))
- }
- }
-
- if len(profiles) != t.numCPU {
- return
- }
-
- lines := map[string]int{}
- lineOrder := []string{}
- for i, coverProfile := range profiles {
- for _, line := range strings.Split(coverProfile, "\n")[1:] {
- if len(line) == 0 {
- continue
- }
- components := strings.Split(line, " ")
- count, _ := strconv.Atoi(components[len(components)-1])
- prefix := strings.Join(components[0:len(components)-1], " ")
- lines[prefix] += count
- if i == 0 {
- lineOrder = append(lineOrder, prefix)
- }
- }
- }
-
- output := []string{"mode: atomic"}
- for _, line := range lineOrder {
- output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
- }
- finalOutput := strings.Join(output, "\n")
-
- finalFilename := ""
-
- if coverProfile != "" {
- finalFilename = coverProfile
- } else {
- finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix)
- }
-
- coverageFilepath := filepath.Join(t.Suite.Path, finalFilename)
- ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666)
-
- t.CoverageFile = coverageFilepath
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
deleted file mode 100644
index 9de8c2bb4..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package testsuite
-
-import (
- "errors"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
- "strings"
-)
-
-type TestSuite struct {
- Path string
- PackageName string
- IsGinkgo bool
- Precompiled bool
-}
-
-func PrecompiledTestSuite(path string) (TestSuite, error) {
- info, err := os.Stat(path)
- if err != nil {
- return TestSuite{}, err
- }
-
- if info.IsDir() {
- return TestSuite{}, errors.New("this is a directory, not a file")
- }
-
- if filepath.Ext(path) != ".test" {
- return TestSuite{}, errors.New("this is not a .test binary")
- }
-
- if info.Mode()&0111 == 0 {
- return TestSuite{}, errors.New("this is not executable")
- }
-
- dir := relPath(filepath.Dir(path))
- packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
-
- return TestSuite{
- Path: dir,
- PackageName: packageName,
- IsGinkgo: true,
- Precompiled: true,
- }, nil
-}
-
-func SuitesInDir(dir string, recurse bool) []TestSuite {
- suites := []TestSuite{}
-
- if vendorExperimentCheck(dir) {
- return suites
- }
-
- files, _ := ioutil.ReadDir(dir)
- re := regexp.MustCompile(`^[^._].*_test\.go$`)
- for _, file := range files {
- if !file.IsDir() && re.Match([]byte(file.Name())) {
- suites = append(suites, New(dir, files))
- break
- }
- }
-
- if recurse {
- re = regexp.MustCompile(`^[._]`)
- for _, file := range files {
- if file.IsDir() && !re.Match([]byte(file.Name())) {
- suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
- }
- }
- }
-
- return suites
-}
-
-func relPath(dir string) string {
- dir, _ = filepath.Abs(dir)
- cwd, _ := os.Getwd()
- dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
-
- if string(dir[0]) != "." {
- dir = "." + string(filepath.Separator) + dir
- }
-
- return dir
-}
-
-func New(dir string, files []os.FileInfo) TestSuite {
- return TestSuite{
- Path: relPath(dir),
- PackageName: packageNameForSuite(dir),
- IsGinkgo: filesHaveGinkgoSuite(dir, files),
- }
-}
-
-func packageNameForSuite(dir string) string {
- path, _ := filepath.Abs(dir)
- return filepath.Base(path)
-}
-
-func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
- reTestFile := regexp.MustCompile(`_test\.go$`)
- reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
-
- for _, file := range files {
- if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
- contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
- if reGinkgo.Match(contents) {
- return true
- }
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go
deleted file mode 100644
index 75f827a12..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build !go1.6
-
-package testsuite
-
-import (
- "os"
- "path"
-)
-
-// "This change will only be enabled if the go command is run with
-// GO15VENDOREXPERIMENT=1 in its environment."
-// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC
-func vendorExperimentCheck(dir string) bool {
- vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
- return vendorExperiment == "1" && path.Base(dir) == "vendor"
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go
deleted file mode 100644
index 596e5e5c1..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build go1.6
-
-package testsuite
-
-import (
- "os"
- "path"
-)
-
-// in 1.6 the vendor directory became the default go behaviour, so now
-// check if its disabled.
-func vendorExperimentCheck(dir string) bool {
- vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
- return vendorExperiment != "0" && path.Base(dir) == "vendor"
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
deleted file mode 100644
index a5b68c216..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
-
- "github.com/onsi/ginkgo/config"
-)
-
-func BuildVersionCommand() *Command {
- return &Command{
- Name: "version",
- FlagSet: flag.NewFlagSet("version", flag.ExitOnError),
- UsageCommand: "ginkgo version",
- Usage: []string{
- "Print Ginkgo's version",
- },
- Command: printVersion,
- }
-}
-
-func printVersion([]string, []string) {
- fmt.Printf("Ginkgo Version %s\n", config.VERSION)
- emitRCAdvertisement()
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
deleted file mode 100644
index a6ef053c8..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "regexp"
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/ginkgo/interrupthandler"
- "github.com/onsi/ginkgo/ginkgo/testrunner"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
- "github.com/onsi/ginkgo/ginkgo/watch"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
-)
-
-func BuildWatchCommand() *Command {
- commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
- interruptHandler := interrupthandler.NewInterruptHandler()
- notifier := NewNotifier(commandFlags)
- watcher := &SpecWatcher{
- commandFlags: commandFlags,
- notifier: notifier,
- interruptHandler: interruptHandler,
- suiteRunner: NewSuiteRunner(notifier, interruptHandler),
- }
-
- return &Command{
- Name: "watch",
- FlagSet: commandFlags.FlagSet,
- UsageCommand: "ginkgo watch -- ",
- Usage: []string{
- "Watches the tests in the passed in and runs them when changes occur.",
- "Any arguments after -- will be passed to the test.",
- },
- Command: watcher.WatchSpecs,
- SuppressFlagDocumentation: true,
- FlagDocSubstitute: []string{
- "Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
- },
- }
-}
-
-type SpecWatcher struct {
- commandFlags *RunWatchAndBuildCommandFlags
- notifier *Notifier
- interruptHandler *interrupthandler.InterruptHandler
- suiteRunner *SuiteRunner
-}
-
-func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
- w.commandFlags.computeNodes()
- w.notifier.VerifyNotificationsAreAvailable()
-
- w.WatchSuites(args, additionalArgs)
-}
-
-func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
- runners := []*testrunner.TestRunner{}
-
- for _, suite := range suites {
- runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs))
- }
-
- return runners
-}
-
-func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
- suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
-
- if len(suites) == 0 {
- complainAndQuit("Found no test suites")
- }
-
- fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
- deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth, regexp.MustCompile(w.commandFlags.WatchRegExp))
- delta, errors := deltaTracker.Delta(suites)
-
- fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
- for _, suite := range delta.NewSuites {
- fmt.Println(" " + suite.Description())
- }
-
- for suite, err := range errors {
- fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
- }
-
- if len(suites) == 1 {
- runners := w.runnersForSuites(suites, additionalArgs)
- w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
- runners[0].CleanUp()
- }
-
- ticker := time.NewTicker(time.Second)
-
- for {
- select {
- case <-ticker.C:
- suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
- delta, _ := deltaTracker.Delta(suites)
- coloredStream := colorable.NewColorableStdout()
-
- suitesToRun := []testsuite.TestSuite{}
-
- if len(delta.NewSuites) > 0 {
- fmt.Fprintf(coloredStream, greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
- for _, suite := range delta.NewSuites {
- suitesToRun = append(suitesToRun, suite.Suite)
- fmt.Fprintln(coloredStream, " "+suite.Description())
- }
- }
-
- modifiedSuites := delta.ModifiedSuites()
- if len(modifiedSuites) > 0 {
- fmt.Fprintln(coloredStream, greenColor+"\nDetected changes in:"+defaultStyle)
- for _, pkg := range delta.ModifiedPackages {
- fmt.Fprintln(coloredStream, " "+pkg)
- }
- fmt.Fprintf(coloredStream, greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
- for _, suite := range modifiedSuites {
- suitesToRun = append(suitesToRun, suite.Suite)
- fmt.Fprintln(coloredStream, " "+suite.Description())
- }
- fmt.Fprintln(coloredStream, "")
- }
-
- if len(suitesToRun) > 0 {
- w.UpdateSeed()
- w.ComputeSuccinctMode(len(suitesToRun))
- runners := w.runnersForSuites(suitesToRun, additionalArgs)
- result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
- deltaTracker.WillRun(suite)
- })
- for _, runner := range runners {
- runner.CleanUp()
- }
- if !w.interruptHandler.WasInterrupted() {
- color := redColor
- if result.Passed {
- color = greenColor
- }
- fmt.Fprintln(coloredStream, color+"\nDone. Resuming watch..."+defaultStyle)
- }
- }
-
- case <-w.interruptHandler.C:
- return
- }
- }
-}
-
-func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
- if config.DefaultReporterConfig.Verbose {
- config.DefaultReporterConfig.Succinct = false
- return
- }
-
- if w.commandFlags.wasSet("succinct") {
- return
- }
-
- if numSuites == 1 {
- config.DefaultReporterConfig.Succinct = false
- }
-
- if numSuites > 1 {
- config.DefaultReporterConfig.Succinct = true
- }
-}
-
-func (w *SpecWatcher) UpdateSeed() {
- if !w.commandFlags.wasSet("seed") {
- config.GinkgoConfig.RandomSeed = time.Now().Unix()
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
deleted file mode 100644
index ccd7685e3..000000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
-Ginkgo is a BDD-style testing framework for Golang
-
-The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
-
-Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
-
-Ginkgo on Github: http://github.com/onsi/ginkgo
-
-Ginkgo is MIT-Licensed
-*/
-package ginkgo
-
-import (
- "flag"
- "fmt"
- "io"
- "net/http"
- "os"
- "reflect"
- "strings"
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/global"
- "github.com/onsi/ginkgo/internal/remote"
- "github.com/onsi/ginkgo/internal/testingtproxy"
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/reporters/stenographer"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
- "github.com/onsi/ginkgo/types"
-)
-
-var deprecationTracker = types.NewDeprecationTracker()
-
-const GINKGO_VERSION = config.VERSION
-const GINKGO_PANIC = `
-Your test failed.
-Ginkgo panics to prevent subsequent assertions from running.
-Normally Ginkgo rescues this panic so you shouldn't see it.
-
-But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
-To circumvent this, you should call
-
- defer GinkgoRecover()
-
-at the top of the goroutine that caused this panic.
-`
-
-func init() {
- config.Flags(flag.CommandLine, "ginkgo", true)
- GinkgoWriter = writer.New(os.Stdout)
-}
-
-//GinkgoWriter implements an io.Writer
-//When running in verbose mode any writes to GinkgoWriter will be immediately printed
-//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
-//only if the current test fails.
-var GinkgoWriter io.Writer
-
-//The interface by which Ginkgo receives *testing.T
-type GinkgoTestingT interface {
- Fail()
-}
-
-//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
-//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
-//consistent executions from run to run, where your tests contain variability (for
-//example, when selecting random test data).
-func GinkgoRandomSeed() int64 {
- return config.GinkgoConfig.RandomSeed
-}
-
-//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead
-func GinkgoParallelNode() int {
- deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1))
- return GinkgoParallelProcess()
-}
-
-//GinkgoParallelProcess returns the parallel process number for the current ginkgo process
-//The process number is 1-indexed
-func GinkgoParallelProcess() int {
- return config.GinkgoConfig.ParallelNode
-}
-
-//Some matcher libraries or legacy codebases require a *testing.T
-//GinkgoT implements an interface analogous to *testing.T and can be used if
-//the library in question accepts *testing.T through an interface
-//
-// For example, with testify:
-// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
-//
-// Or with gomock:
-// gomock.NewController(GinkgoT())
-//
-// GinkgoT() takes an optional offset argument that can be used to get the
-// correct line number associated with the failure.
-func GinkgoT(optionalOffset ...int) GinkgoTInterface {
- offset := 3
- if len(optionalOffset) > 0 {
- offset = optionalOffset[0]
- }
- failedFunc := func() bool {
- return CurrentGinkgoTestDescription().Failed
- }
- nameFunc := func() string {
- return CurrentGinkgoTestDescription().FullTestText
- }
- return testingtproxy.New(GinkgoWriter, Fail, Skip, failedFunc, nameFunc, offset)
-}
-
-//The interface returned by GinkgoT(). This covers most of the methods
-//in the testing package's T.
-type GinkgoTInterface interface {
- Cleanup(func())
- Setenv(key, value string)
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- Fail()
- FailNow()
- Failed() bool
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Helper()
- Log(args ...interface{})
- Logf(format string, args ...interface{})
- Name() string
- Parallel()
- Skip(args ...interface{})
- SkipNow()
- Skipf(format string, args ...interface{})
- Skipped() bool
- TempDir() string
-}
-
-//Custom Ginkgo test reporters must implement the Reporter interface.
-//
-//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
-//and a SpecSummary just before a spec begins and just after a spec ends
-type Reporter reporters.Reporter
-
-//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
-//to tell Ginkgo that your async test is done.
-type Done chan<- interface{}
-
-//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
-// FullTestText: a concatenation of ComponentTexts and the TestText
-// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
-// TestText: the text in the actual It or Measure node
-// IsMeasurement: true if the current test is a measurement
-// FileName: the name of the file containing the current test
-// LineNumber: the line number for the current test
-// Failed: if the current test has failed, this will be true (useful in an AfterEach)
-type GinkgoTestDescription struct {
- FullTestText string
- ComponentTexts []string
- TestText string
-
- IsMeasurement bool
-
- FileName string
- LineNumber int
-
- Failed bool
- Duration time.Duration
-}
-
-//CurrentGinkgoTestDescripton returns information about the current running test.
-func CurrentGinkgoTestDescription() GinkgoTestDescription {
- summary, ok := global.Suite.CurrentRunningSpecSummary()
- if !ok {
- return GinkgoTestDescription{}
- }
-
- subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
-
- return GinkgoTestDescription{
- ComponentTexts: summary.ComponentTexts[1:],
- FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
- TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
- IsMeasurement: summary.IsMeasurement,
- FileName: subjectCodeLocation.FileName,
- LineNumber: subjectCodeLocation.LineNumber,
- Failed: summary.HasFailureState(),
- Duration: summary.RunTime,
- }
-}
-
-//Measurement tests receive a Benchmarker.
-//
-//You use the Time() function to time how long the passed in body function takes to run
-//You use the RecordValue() function to track arbitrary numerical measurements.
-//The RecordValueWithPrecision() function can be used alternatively to provide the unit
-//and resolution of the numeric measurement.
-//The optional info argument is passed to the test reporter and can be used to
-// provide the measurement data to a custom reporter with context.
-//
-//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
-type Benchmarker interface {
- Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
- RecordValue(name string, value float64, info ...interface{})
- RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
-}
-
-//RunSpecs is the entry point for the Ginkgo test runner.
-//You must call this within a Golang testing TestX(t *testing.T) function.
-//
-//To bootstrap a test suite you can use the Ginkgo CLI:
-//
-// ginkgo bootstrap
-func RunSpecs(t GinkgoTestingT, description string) bool {
- specReporters := []Reporter{buildDefaultReporter()}
- if config.DefaultReporterConfig.ReportFile != "" {
- reportFile := config.DefaultReporterConfig.ReportFile
- specReporters[0] = reporters.NewJUnitReporter(reportFile)
- specReporters = append(specReporters, buildDefaultReporter())
- }
- return runSpecsWithCustomReporters(t, description, specReporters)
-}
-
-//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
-//RunSpecs() with this method.
-func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
- specReporters = append(specReporters, buildDefaultReporter())
- return runSpecsWithCustomReporters(t, description, specReporters)
-}
-
-//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
-//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
-func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
- return runSpecsWithCustomReporters(t, description, specReporters)
-}
-
-func runSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- writer := GinkgoWriter.(*writer.Writer)
- writer.SetStream(config.DefaultReporterConfig.Verbose)
- reporters := make([]reporters.Reporter, len(specReporters))
- for i, reporter := range specReporters {
- reporters[i] = reporter
- }
- passed, hasFocusedTests := global.Suite.Run(t, description, reporters, writer, config.GinkgoConfig)
-
- if deprecationTracker.DidTrackDeprecations() {
- fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
- }
-
- if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
- fmt.Println("PASS | FOCUSED")
- os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
- }
- return passed
-}
-
-func buildDefaultReporter() Reporter {
- remoteReportingServer := config.GinkgoConfig.StreamHost
- if remoteReportingServer == "" {
- stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
- return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
- } else {
- debugFile := ""
- if config.GinkgoConfig.DebugParallel {
- debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
- }
- return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
- }
-}
-
-//Skip notifies Ginkgo that the current spec was skipped.
-func Skip(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
-
- global.Failer.Skip(message, codelocation.New(skip+1))
- panic(GINKGO_PANIC)
-}
-
-//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
-func Fail(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
-
- global.Failer.Fail(message, codelocation.New(skip+1))
- panic(GINKGO_PANIC)
-}
-
-//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
-//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
-//calls out to Gomega
-//
-//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
-//further assertions from running. This panic must be recovered. Ginkgo does this for you
-//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
-//
-//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
-//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
-func GinkgoRecover() {
- e := recover()
- if e != nil {
- global.Failer.Panic(codelocation.New(1), e)
- }
-}
-
-//Describe blocks allow you to organize your specs. A Describe block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func Describe(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FDescribe
-func FDescribe(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PDescribe
-func PDescribe(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XDescribe
-func XDescribe(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//Context blocks allow you to organize your specs. A Context block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func Context(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FContext
-func FContext(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PContext
-func PContext(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XContext
-func XContext(text string, body func()) bool {
- global.Suite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//When blocks allow you to organize your specs. A When block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func When(text string, body func()) bool {
- global.Suite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FWhen
-func FWhen(text string, body func()) bool {
- global.Suite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PWhen
-func PWhen(text string, body func()) bool {
- global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XWhen
-func XWhen(text string, body func()) bool {
- global.Suite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
-//within an It block.
-//
-//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
-//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
-func It(text string, body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can focus individual Its using FIt
-func FIt(text string, body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can mark Its as pending using PIt
-func PIt(text string, _ ...interface{}) bool {
- global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Its as pending using XIt
-func XIt(text string, _ ...interface{}) bool {
- global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//Specify blocks are aliases for It blocks and allow for more natural wording in situations
-//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
-//which apply to It blocks.
-func Specify(text string, body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can focus individual Specifys using FSpecify
-func FSpecify(text string, body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can mark Specifys as pending using PSpecify
-func PSpecify(text string, is ...interface{}) bool {
- global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Specifys as pending using XSpecify
-func XSpecify(text string, is ...interface{}) bool {
- global.Suite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//By allows you to better document large Its.
-//
-//Generally you should try to keep your Its short and to the point. This is not always possible, however,
-//especially in the context of integration tests that capture a particular workflow.
-//
-//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
-//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
-func By(text string, callbacks ...func()) {
- preamble := "\x1b[1mSTEP\x1b[0m"
- if config.DefaultReporterConfig.NoColor {
- preamble = "STEP"
- }
- fmt.Fprintln(GinkgoWriter, preamble+": "+text)
- if len(callbacks) == 1 {
- callbacks[0]()
- }
- if len(callbacks) > 1 {
- panic("just one callback per By, please")
- }
-}
-
-//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
-//and accumulate metrics provided to the Benchmarker by the body function.
-//
-//The body function must have the signature:
-// func(b Benchmarker)
-func Measure(text string, body interface{}, samples int) bool {
- deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
- global.Suite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
- return true
-}
-
-//You can focus individual Measures using FMeasure
-func FMeasure(text string, body interface{}, samples int) bool {
- deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
- global.Suite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
- return true
-}
-
-//You can mark Measurements as pending using PMeasure
-func PMeasure(text string, _ ...interface{}) bool {
- deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
- global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Measurements as pending using XMeasure
-func XMeasure(text string, _ ...interface{}) bool {
- deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), codelocation.New(1))
- global.Suite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
-//parallel node process will call BeforeSuite.
-//
-//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
-//
-//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
-func BeforeSuite(body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
-//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
-//
-//When running in parallel, each parallel node process will call AfterSuite.
-//
-//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
-//
-//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
-func AfterSuite(body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
-//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
-//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
-//until that node is done before running.
-//
-//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
-//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
-//to the second function (on all the other nodes).
-//
-//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
-//
-// func() []byte
-//
-//or, to run asynchronously:
-//
-// func(done Done) []byte
-//
-//The byte array returned by the first function is then passed to the second function, which has the signature:
-//
-// func(data []byte)
-//
-//or, to run asynchronously:
-//
-// func(data []byte, done Done)
-//
-//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
-//
-// var dbClient db.Client
-// var dbRunner db.Runner
-//
-// var _ = SynchronizedBeforeSuite(func() []byte {
-// dbRunner = db.NewRunner()
-// err := dbRunner.Start()
-// Ω(err).ShouldNot(HaveOccurred())
-// return []byte(dbRunner.URL)
-// }, func(data []byte) {
-// dbClient = db.NewClient()
-// err := dbClient.Connect(string(data))
-// Ω(err).ShouldNot(HaveOccurred())
-// })
-func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
- global.Suite.SetSynchronizedBeforeSuiteNode(
- node1Body,
- allNodesBody,
- codelocation.New(1),
- parseTimeout(timeout...),
- )
- return true
-}
-
-//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
-//external singleton resources shared across nodes when running tests in parallel.
-//
-//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
-//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
-//all other nodes are finished.
-//
-//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
-//
-//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
-//only after all nodes have finished:
-//
-// var _ = SynchronizedAfterSuite(func() {
-// dbClient.Cleanup()
-// }, func() {
-// dbRunner.Stop()
-// })
-func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
- global.Suite.SetSynchronizedAfterSuiteNode(
- allNodesBody,
- node1Body,
- codelocation.New(1),
- parseTimeout(timeout...),
- )
- return true
-}
-
-//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
-//Describe and Context blocks the outermost BeforeEach blocks are run first.
-//
-//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func BeforeEach(body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
-//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
-//
-//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func JustBeforeEach(body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
-//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
-//
-//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func JustAfterEach(body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
-//Describe and Context blocks the innermost AfterEach blocks are run first.
-//
-//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func AfterEach(body interface{}, timeout ...float64) bool {
- validateBodyFunc(body, codelocation.New(1))
- global.Suite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-func validateBodyFunc(body interface{}, cl types.CodeLocation) {
- t := reflect.TypeOf(body)
- if t.Kind() != reflect.Func {
- return
- }
-
- if t.NumOut() > 0 {
- return
- }
-
- if t.NumIn() == 0 {
- return
- }
-
- if t.In(0) == reflect.TypeOf(make(Done)) {
- deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
- }
-}
-
-func parseTimeout(timeout ...float64) time.Duration {
- if len(timeout) == 0 {
- return global.DefaultTimeout
- } else {
- return time.Duration(timeout[0] * float64(time.Second))
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
deleted file mode 100644
index aa89d6cba..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package codelocation
-
-import (
- "regexp"
- "runtime"
- "runtime/debug"
- "strings"
-
- "github.com/onsi/ginkgo/types"
-)
-
-func New(skip int) types.CodeLocation {
- _, file, line, _ := runtime.Caller(skip + 1)
- stackTrace := PruneStack(string(debug.Stack()), skip+1)
- return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
-}
-
-// PruneStack removes references to functions that are internal to Ginkgo
-// and the Go runtime from a stack string and a certain number of stack entries
-// at the beginning of the stack. The stack string has the format
-// as returned by runtime/debug.Stack. The leading goroutine information is
-// optional and always removed if present. Beware that runtime/debug.Stack
-// adds itself as first entry, so typically skip must be >= 1 to remove that
-// entry.
-func PruneStack(fullStackTrace string, skip int) string {
- stack := strings.Split(fullStackTrace, "\n")
- // Ensure that the even entries are the method names and the
- // the odd entries the source code information.
- if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
- // Ignore "goroutine 29 [running]:" line.
- stack = stack[1:]
- }
- // The "+1" is for skipping over the initial entry, which is
- // runtime/debug.Stack() itself.
- if len(stack) > 2*(skip+1) {
- stack = stack[2*(skip+1):]
- }
- prunedStack := []string{}
- re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
- for i := 0; i < len(stack)/2; i++ {
- // We filter out based on the source code file name.
- if !re.Match([]byte(stack[i*2+1])) {
- prunedStack = append(prunedStack, stack[i*2])
- prunedStack = append(prunedStack, stack[i*2+1])
- }
- }
- return strings.Join(prunedStack, "\n")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
deleted file mode 100644
index 0737746dc..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package containernode
-
-import (
- "math/rand"
- "sort"
-
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-type subjectOrContainerNode struct {
- containerNode *ContainerNode
- subjectNode leafnodes.SubjectNode
-}
-
-func (n subjectOrContainerNode) text() string {
- if n.containerNode != nil {
- return n.containerNode.Text()
- } else {
- return n.subjectNode.Text()
- }
-}
-
-type CollatedNodes struct {
- Containers []*ContainerNode
- Subject leafnodes.SubjectNode
-}
-
-type ContainerNode struct {
- text string
- flag types.FlagType
- codeLocation types.CodeLocation
-
- setupNodes []leafnodes.BasicNode
- subjectAndContainerNodes []subjectOrContainerNode
-}
-
-func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
- return &ContainerNode{
- text: text,
- flag: flag,
- codeLocation: codeLocation,
- }
-}
-
-func (container *ContainerNode) Shuffle(r *rand.Rand) {
- sort.Sort(container)
- permutation := r.Perm(len(container.subjectAndContainerNodes))
- shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
- for i, j := range permutation {
- shuffledNodes[i] = container.subjectAndContainerNodes[j]
- }
- container.subjectAndContainerNodes = shuffledNodes
-}
-
-func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
- if node.flag == types.FlagTypePending {
- return false
- }
-
- shouldUnfocus := false
- for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
- if subjectOrContainerNode.containerNode != nil {
- shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
- } else {
- shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
- }
- }
-
- if shouldUnfocus {
- if node.flag == types.FlagTypeFocused {
- node.flag = types.FlagTypeNone
- }
- return true
- }
-
- return node.flag == types.FlagTypeFocused
-}
-
-func (node *ContainerNode) Collate() []CollatedNodes {
- return node.collate([]*ContainerNode{})
-}
-
-func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
- collated := make([]CollatedNodes, 0)
-
- containers := make([]*ContainerNode, len(enclosingContainers))
- copy(containers, enclosingContainers)
- containers = append(containers, node)
-
- for _, subjectOrContainer := range node.subjectAndContainerNodes {
- if subjectOrContainer.containerNode != nil {
- collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
- } else {
- collated = append(collated, CollatedNodes{
- Containers: containers,
- Subject: subjectOrContainer.subjectNode,
- })
- }
- }
-
- return collated
-}
-
-func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
- node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
-}
-
-func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
- node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
-}
-
-func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
- node.setupNodes = append(node.setupNodes, setupNode)
-}
-
-func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
- nodes := []leafnodes.BasicNode{}
- for _, setupNode := range node.setupNodes {
- if setupNode.Type() == nodeType {
- nodes = append(nodes, setupNode)
- }
- }
- return nodes
-}
-
-func (node *ContainerNode) Text() string {
- return node.text
-}
-
-func (node *ContainerNode) CodeLocation() types.CodeLocation {
- return node.codeLocation
-}
-
-func (node *ContainerNode) Flag() types.FlagType {
- return node.flag
-}
-
-//sort.Interface
-
-func (node *ContainerNode) Len() int {
- return len(node.subjectAndContainerNodes)
-}
-
-func (node *ContainerNode) Less(i, j int) bool {
- return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
-}
-
-func (node *ContainerNode) Swap(i, j int) {
- node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/global/init.go b/vendor/github.com/onsi/ginkgo/internal/global/init.go
deleted file mode 100644
index 109f617a5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/global/init.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package global
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/suite"
-)
-
-const DefaultTimeout = time.Duration(1 * time.Second)
-
-var Suite *suite.Suite
-var Failer *failer.Failer
-
-func init() {
- InitializeGlobals()
-}
-
-func InitializeGlobals() {
- Failer = failer.New()
- Suite = suite.New(Failer)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
deleted file mode 100644
index 393901e11..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package leafnodes
-
-import (
- "math"
- "time"
-
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-type benchmarker struct {
- mu sync.Mutex
- measurements map[string]*types.SpecMeasurement
- orderCounter int
-}
-
-func newBenchmarker() *benchmarker {
- return &benchmarker{
- measurements: make(map[string]*types.SpecMeasurement),
- }
-}
-
-func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
- t := time.Now()
- body()
- elapsedTime = time.Since(t)
-
- b.mu.Lock()
- defer b.mu.Unlock()
- measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
- measurement.Results = append(measurement.Results, elapsedTime.Seconds())
-
- return
-}
-
-func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
- b.mu.Lock()
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
- defer b.mu.Unlock()
- measurement.Results = append(measurement.Results, value)
-}
-
-func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
- b.mu.Lock()
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
- defer b.mu.Unlock()
- measurement.Results = append(measurement.Results, value)
-}
-
-func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
- measurement, ok := b.measurements[name]
- if !ok {
- var computedInfo interface{}
- computedInfo = nil
- if len(info) > 0 {
- computedInfo = info[0]
- }
- measurement = &types.SpecMeasurement{
- Name: name,
- Info: computedInfo,
- Order: b.orderCounter,
- SmallestLabel: smallestLabel,
- LargestLabel: largestLabel,
- AverageLabel: averageLabel,
- Units: units,
- Precision: precision,
- Results: make([]float64, 0),
- }
- b.measurements[name] = measurement
- b.orderCounter++
- }
-
- return measurement
-}
-
-func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
- b.mu.Lock()
- defer b.mu.Unlock()
- for _, measurement := range b.measurements {
- measurement.Smallest = math.MaxFloat64
- measurement.Largest = -math.MaxFloat64
- sum := float64(0)
- sumOfSquares := float64(0)
-
- for _, result := range measurement.Results {
- if result > measurement.Largest {
- measurement.Largest = result
- }
- if result < measurement.Smallest {
- measurement.Smallest = result
- }
- sum += result
- sumOfSquares += result * result
- }
-
- n := float64(len(measurement.Results))
- measurement.Average = sum / n
- measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
- }
-
- return b.measurements
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
deleted file mode 100644
index 8c3902d60..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package leafnodes
-
-import (
- "github.com/onsi/ginkgo/types"
-)
-
-type BasicNode interface {
- Type() types.SpecComponentType
- Run() (types.SpecState, types.SpecFailure)
- CodeLocation() types.CodeLocation
-}
-
-type SubjectNode interface {
- BasicNode
-
- Text() string
- Flag() types.FlagType
- Samples() int
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
deleted file mode 100644
index 6eded7b76..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type ItNode struct {
- runner *runner
-
- flag types.FlagType
- text string
-}
-
-func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
- return &ItNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
- flag: flag,
- text: text,
- }
-}
-
-func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *ItNode) Type() types.SpecComponentType {
- return types.SpecComponentTypeIt
-}
-
-func (node *ItNode) Text() string {
- return node.text
-}
-
-func (node *ItNode) Flag() types.FlagType {
- return node.flag
-}
-
-func (node *ItNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func (node *ItNode) Samples() int {
- return 1
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
deleted file mode 100644
index 3ab9a6d55..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package leafnodes
-
-import (
- "reflect"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type MeasureNode struct {
- runner *runner
-
- text string
- flag types.FlagType
- samples int
- benchmarker *benchmarker
-}
-
-func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
- benchmarker := newBenchmarker()
-
- wrappedBody := func() {
- reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
- }
-
- return &MeasureNode{
- runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
-
- text: text,
- flag: flag,
- samples: samples,
- benchmarker: benchmarker,
- }
-}
-
-func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
- return node.benchmarker.measurementsReport()
-}
-
-func (node *MeasureNode) Type() types.SpecComponentType {
- return types.SpecComponentTypeMeasure
-}
-
-func (node *MeasureNode) Text() string {
- return node.text
-}
-
-func (node *MeasureNode) Flag() types.FlagType {
- return node.flag
-}
-
-func (node *MeasureNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func (node *MeasureNode) Samples() int {
- return node.samples
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
deleted file mode 100644
index 16cb66c3e..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package leafnodes
-
-import (
- "fmt"
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type runner struct {
- isAsync bool
- asyncFunc func(chan<- interface{})
- syncFunc func()
- codeLocation types.CodeLocation
- timeoutThreshold time.Duration
- nodeType types.SpecComponentType
- componentIndex int
- failer *failer.Failer
-}
-
-func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
- bodyType := reflect.TypeOf(body)
- if bodyType.Kind() != reflect.Func {
- panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
- }
-
- runner := &runner{
- codeLocation: codeLocation,
- timeoutThreshold: timeout,
- failer: failer,
- nodeType: nodeType,
- componentIndex: componentIndex,
- }
-
- switch bodyType.NumIn() {
- case 0:
- runner.syncFunc = body.(func())
- return runner
- case 1:
- if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
- panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
- }
-
- wrappedBody := func(done chan<- interface{}) {
- bodyValue := reflect.ValueOf(body)
- bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
- }
-
- runner.isAsync = true
- runner.asyncFunc = wrappedBody
- return runner
- }
-
- panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
-}
-
-func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
- if r.isAsync {
- return r.runAsync()
- } else {
- return r.runSync()
- }
-}
-
-func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
- done := make(chan interface{}, 1)
-
- go func() {
- finished := false
-
- defer func() {
- if e := recover(); e != nil || !finished {
- r.failer.Panic(codelocation.New(2), e)
- select {
- case <-done:
- break
- default:
- close(done)
- }
- }
- }()
-
- r.asyncFunc(done)
- finished = true
- }()
-
- // If this goroutine gets no CPU time before the select block,
- // the <-done case may complete even if the test took longer than the timeoutThreshold.
- // This can cause flaky behaviour, but we haven't seen it in the wild.
- select {
- case <-done:
- case <-time.After(r.timeoutThreshold):
- r.failer.Timeout(r.codeLocation)
- }
-
- failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
- return
-}
-func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
- finished := false
-
- defer func() {
- if e := recover(); e != nil || !finished {
- r.failer.Panic(codelocation.New(2), e)
- }
-
- failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
- }()
-
- r.syncFunc()
- finished = true
-
- return
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
deleted file mode 100644
index e3e9cb7c5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type SetupNode struct {
- runner *runner
-}
-
-func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *SetupNode) Type() types.SpecComponentType {
- return node.runner.nodeType
-}
-
-func (node *SetupNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
- }
-}
-
-func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
- }
-}
-
-func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
- }
-}
-
-func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
deleted file mode 100644
index 80f16ed78..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type SuiteNode interface {
- Run(parallelNode int, parallelTotal int, syncHost string) bool
- Passed() bool
- Summary() *types.SetupSummary
-}
-
-type simpleSuiteNode struct {
- runner *runner
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- t := time.Now()
- node.outcome, node.failure = node.runner.run()
- node.runTime = time.Since(t)
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *simpleSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *simpleSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runner.nodeType,
- CodeLocation: node.runner.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &simpleSuiteNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
- }
-}
-
-func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &simpleSuiteNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
deleted file mode 100644
index a721d0cf7..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package leafnodes
-
-import (
- "encoding/json"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type synchronizedAfterSuiteNode struct {
- runnerA *runner
- runnerB *runner
-
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &synchronizedAfterSuiteNode{
- runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- }
-}
-
-func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- node.outcome, node.failure = node.runnerA.run()
-
- if parallelNode == 1 {
- if parallelTotal > 1 {
- node.waitUntilOtherNodesAreDone(syncHost)
- }
-
- outcome, failure := node.runnerB.run()
-
- if node.outcome == types.SpecStatePassed {
- node.outcome, node.failure = outcome, failure
- }
- }
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedAfterSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runnerA.nodeType,
- CodeLocation: node.runnerA.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
- for {
- if node.canRun(syncHost) {
- return
- }
-
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
- resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
- if err != nil || resp.StatusCode != http.StatusOK {
- return false
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return false
- }
- resp.Body.Close()
-
- afterSuiteData := types.RemoteAfterSuiteData{}
- err = json.Unmarshal(body, &afterSuiteData)
- if err != nil {
- return false
- }
-
- return afterSuiteData.CanRun
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
deleted file mode 100644
index d5c889319..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package leafnodes
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type synchronizedBeforeSuiteNode struct {
- runnerA *runner
- runnerB *runner
-
- data []byte
-
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- node := &synchronizedBeforeSuiteNode{}
-
- node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
- node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
-
- return node
-}
-
-func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- t := time.Now()
- defer func() {
- node.runTime = time.Since(t)
- }()
-
- if parallelNode == 1 {
- node.outcome, node.failure = node.runA(parallelTotal, syncHost)
- } else {
- node.outcome, node.failure = node.waitForA(syncHost)
- }
-
- if node.outcome != types.SpecStatePassed {
- return false
- }
- node.outcome, node.failure = node.runnerB.run()
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
- outcome, failure := node.runnerA.run()
-
- if parallelTotal > 1 {
- state := types.RemoteBeforeSuiteStatePassed
- if outcome != types.SpecStatePassed {
- state = types.RemoteBeforeSuiteStateFailed
- }
- json := (types.RemoteBeforeSuiteData{
- Data: node.data,
- State: state,
- }).ToJSON()
- http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
- }
-
- return outcome, failure
-}
-
-func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
- failure := func(message string) types.SpecFailure {
- return types.SpecFailure{
- Message: message,
- Location: node.runnerA.codeLocation,
- ComponentType: node.runnerA.nodeType,
- ComponentIndex: node.runnerA.componentIndex,
- ComponentCodeLocation: node.runnerA.codeLocation,
- }
- }
- for {
- resp, err := http.Get(syncHost + "/BeforeSuiteState")
- if err != nil || resp.StatusCode != http.StatusOK {
- return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
- }
- resp.Body.Close()
-
- beforeSuiteData := types.RemoteBeforeSuiteData{}
- err = json.Unmarshal(body, &beforeSuiteData)
- if err != nil {
- return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
- }
-
- switch beforeSuiteData.State {
- case types.RemoteBeforeSuiteStatePassed:
- node.data = beforeSuiteData.Data
- return types.SpecStatePassed, types.SpecFailure{}
- case types.RemoteBeforeSuiteStateFailed:
- return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
- case types.RemoteBeforeSuiteStateDisappeared:
- return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
- }
-
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runnerA.nodeType,
- CodeLocation: node.runnerA.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
- typeA := reflect.TypeOf(bodyA)
- if typeA.Kind() != reflect.Func {
- panic("SynchronizedBeforeSuite expects a function as its first argument")
- }
-
- takesNothing := typeA.NumIn() == 0
- takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
- returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
-
- if !((takesNothing || takesADoneChannel) && returnsBytes) {
- panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
- }
-
- if takesADoneChannel {
- return func(done chan<- interface{}) {
- out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
- node.data = out[0].Interface().([]byte)
- }
- }
-
- return func() {
- out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
- node.data = out[0].Interface().([]byte)
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
- typeB := reflect.TypeOf(bodyB)
- if typeB.Kind() != reflect.Func {
- panic("SynchronizedBeforeSuite expects a function as its second argument")
- }
-
- returnsNothing := typeB.NumOut() == 0
- takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
- takesBytesAndDone := typeB.NumIn() == 2 &&
- typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
- typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
-
- if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
- panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
- }
-
- if takesBytesAndDone {
- return func(done chan<- interface{}) {
- reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
- }
- }
-
- return func() {
- reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
deleted file mode 100644
index 992437d9e..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
-
-Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
-coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
-
- ginkgo -nodes=N
-
-where N is the number of nodes you desire.
-*/
-package remote
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-type configAndSuite struct {
- config config.GinkgoConfigType
- summary *types.SuiteSummary
-}
-
-type Aggregator struct {
- nodeCount int
- config config.DefaultReporterConfigType
- stenographer stenographer.Stenographer
- result chan bool
-
- suiteBeginnings chan configAndSuite
- aggregatedSuiteBeginnings []configAndSuite
-
- beforeSuites chan *types.SetupSummary
- aggregatedBeforeSuites []*types.SetupSummary
-
- afterSuites chan *types.SetupSummary
- aggregatedAfterSuites []*types.SetupSummary
-
- specCompletions chan *types.SpecSummary
- completedSpecs []*types.SpecSummary
-
- suiteEndings chan *types.SuiteSummary
- aggregatedSuiteEndings []*types.SuiteSummary
- specs []*types.SpecSummary
-
- startTime time.Time
-}
-
-func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
- aggregator := &Aggregator{
- nodeCount: nodeCount,
- result: result,
- config: config,
- stenographer: stenographer,
-
- suiteBeginnings: make(chan configAndSuite),
- beforeSuites: make(chan *types.SetupSummary),
- afterSuites: make(chan *types.SetupSummary),
- specCompletions: make(chan *types.SpecSummary),
- suiteEndings: make(chan *types.SuiteSummary),
- }
-
- go aggregator.mux()
-
- return aggregator
-}
-
-func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- aggregator.suiteBeginnings <- configAndSuite{config, summary}
-}
-
-func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- aggregator.beforeSuites <- setupSummary
-}
-
-func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- aggregator.afterSuites <- setupSummary
-}
-
-func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
- //noop
-}
-
-func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
- aggregator.specCompletions <- specSummary
-}
-
-func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- aggregator.suiteEndings <- summary
-}
-
-func (aggregator *Aggregator) mux() {
-loop:
- for {
- select {
- case configAndSuite := <-aggregator.suiteBeginnings:
- aggregator.registerSuiteBeginning(configAndSuite)
- case setupSummary := <-aggregator.beforeSuites:
- aggregator.registerBeforeSuite(setupSummary)
- case setupSummary := <-aggregator.afterSuites:
- aggregator.registerAfterSuite(setupSummary)
- case specSummary := <-aggregator.specCompletions:
- aggregator.registerSpecCompletion(specSummary)
- case suite := <-aggregator.suiteEndings:
- finished, passed := aggregator.registerSuiteEnding(suite)
- if finished {
- aggregator.result <- passed
- break loop
- }
- }
- }
-}
-
-func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
- aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
-
- if len(aggregator.aggregatedSuiteBeginnings) == 1 {
- aggregator.startTime = time.Now()
- }
-
- if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
- return
- }
-
- aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
-
- totalNumberOfSpecs := 0
- if len(aggregator.aggregatedSuiteBeginnings) > 0 {
- totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
- }
-
- aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
- aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
- aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
- aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
- aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
- aggregator.specs = append(aggregator.specs, specSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) flushCompletedSpecs() {
- if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
- return
- }
-
- for _, setupSummary := range aggregator.aggregatedBeforeSuites {
- aggregator.announceBeforeSuite(setupSummary)
- }
-
- for _, specSummary := range aggregator.completedSpecs {
- aggregator.announceSpec(specSummary)
- }
-
- for _, setupSummary := range aggregator.aggregatedAfterSuites {
- aggregator.announceAfterSuite(setupSummary)
- }
-
- aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
- aggregator.completedSpecs = []*types.SpecSummary{}
- aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
-}
-
-func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
- aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
- if setupSummary.State != types.SpecStatePassed {
- aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
- aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
- if setupSummary.State != types.SpecStatePassed {
- aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
- if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
- aggregator.stenographer.AnnounceSpecWillRun(specSummary)
- }
-
- aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
-
- switch specSummary.State {
- case types.SpecStatePassed:
- if specSummary.IsMeasurement {
- aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
- } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
- aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
- } else {
- aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
- }
-
- case types.SpecStatePending:
- aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
- case types.SpecStateSkipped:
- aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
- case types.SpecStateTimedOut:
- aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- case types.SpecStatePanicked:
- aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- case types.SpecStateFailed:
- aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
- aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
- if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
- return false, false
- }
-
- aggregatedSuiteSummary := &types.SuiteSummary{}
- aggregatedSuiteSummary.SuiteSucceeded = true
-
- for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
- if !suiteSummary.SuiteSucceeded {
- aggregatedSuiteSummary.SuiteSucceeded = false
- }
-
- aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
- aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
- aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
- aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
- aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
- aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
- aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
- }
-
- aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
-
- aggregator.stenographer.SummarizeFailures(aggregator.specs)
- aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
-
- return true, aggregatedSuiteSummary.SuiteSucceeded
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
deleted file mode 100644
index 284bc62e5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package remote
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "os"
-
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/reporters/stenographer"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-//An interface to net/http's client to allow the injection of fakes under test
-type Poster interface {
- Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
-}
-
-/*
-The ForwardingReporter is a Ginkgo reporter that forwards information to
-a Ginkgo remote server.
-
-When streaming parallel test output, this repoter is automatically installed by Ginkgo.
-
-This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
-detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
-in place of Ginkgo's DefaultReporter.
-*/
-
-type ForwardingReporter struct {
- serverHost string
- poster Poster
- outputInterceptor OutputInterceptor
- debugMode bool
- debugFile *os.File
- nestedReporter *reporters.DefaultReporter
-}
-
-func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
- reporter := &ForwardingReporter{
- serverHost: serverHost,
- poster: poster,
- outputInterceptor: outputInterceptor,
- }
-
- if debugFile != "" {
- var err error
- reporter.debugMode = true
- reporter.debugFile, err = os.Create(debugFile)
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
-
- if !config.Verbose {
- //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
- ginkgoWriter.AndRedirectTo(reporter.debugFile)
- }
- outputInterceptor.StreamTo(reporter.debugFile) //This is not working
-
- stenographer := stenographer.New(false, true, reporter.debugFile)
- config.Succinct = false
- config.Verbose = true
- config.FullTrace = true
- reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
- }
-
- return reporter
-}
-
-func (reporter *ForwardingReporter) post(path string, data interface{}) {
- encoded, _ := json.Marshal(data)
- buffer := bytes.NewBuffer(encoded)
- reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
-}
-
-func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
- data := struct {
- Config config.GinkgoConfigType `json:"config"`
- Summary *types.SuiteSummary `json:"suite-summary"`
- }{
- conf,
- summary,
- }
-
- reporter.outputInterceptor.StartInterceptingOutput()
- if reporter.debugMode {
- reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecSuiteWillBegin", data)
-}
-
-func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- setupSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/BeforeSuiteDidRun", setupSummary)
-}
-
-func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if reporter.debugMode {
- reporter.nestedReporter.SpecWillRun(specSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecWillRun", specSummary)
-}
-
-func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- specSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.SpecDidComplete(specSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecDidComplete", specSummary)
-}
-
-func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- setupSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/AfterSuiteDidRun", setupSummary)
-}
-
-func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- if reporter.debugMode {
- reporter.nestedReporter.SpecSuiteDidEnd(summary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecSuiteDidEnd", summary)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
deleted file mode 100644
index 5154abe87..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package remote
-
-import "os"
-
-/*
-The OutputInterceptor is used by the ForwardingReporter to
-intercept and capture all stdin and stderr output during a test run.
-*/
-type OutputInterceptor interface {
- StartInterceptingOutput() error
- StopInterceptingAndReturnOutput() (string, error)
- StreamTo(*os.File)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
deleted file mode 100644
index 774967db6..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// +build freebsd openbsd netbsd dragonfly darwin linux solaris
-
-package remote
-
-import (
- "errors"
- "io/ioutil"
- "os"
-
- "github.com/nxadm/tail"
- "golang.org/x/sys/unix"
-)
-
-func NewOutputInterceptor() OutputInterceptor {
- return &outputInterceptor{}
-}
-
-type outputInterceptor struct {
- redirectFile *os.File
- streamTarget *os.File
- intercepting bool
- tailer *tail.Tail
- doneTailing chan bool
-}
-
-func (interceptor *outputInterceptor) StartInterceptingOutput() error {
- if interceptor.intercepting {
- return errors.New("Already intercepting output!")
- }
- interceptor.intercepting = true
-
- var err error
-
- interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
- if err != nil {
- return err
- }
-
- // This might call Dup3 if the dup2 syscall is not available, e.g. on
- // linux/arm64 or linux/riscv64
- unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
- unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
-
- if interceptor.streamTarget != nil {
- interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
- interceptor.doneTailing = make(chan bool)
-
- go func() {
- for line := range interceptor.tailer.Lines {
- interceptor.streamTarget.Write([]byte(line.Text + "\n"))
- }
- close(interceptor.doneTailing)
- }()
- }
-
- return nil
-}
-
-func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- if !interceptor.intercepting {
- return "", errors.New("Not intercepting output!")
- }
-
- interceptor.redirectFile.Close()
- output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
- os.Remove(interceptor.redirectFile.Name())
-
- interceptor.intercepting = false
-
- if interceptor.streamTarget != nil {
- interceptor.tailer.Stop()
- interceptor.tailer.Cleanup()
- <-interceptor.doneTailing
- interceptor.streamTarget.Sync()
- }
-
- return string(output), err
-}
-
-func (interceptor *outputInterceptor) StreamTo(out *os.File) {
- interceptor.streamTarget = out
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
deleted file mode 100644
index 40c790336..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// +build windows
-
-package remote
-
-import (
- "errors"
- "os"
-)
-
-func NewOutputInterceptor() OutputInterceptor {
- return &outputInterceptor{}
-}
-
-type outputInterceptor struct {
- intercepting bool
-}
-
-func (interceptor *outputInterceptor) StartInterceptingOutput() error {
- if interceptor.intercepting {
- return errors.New("Already intercepting output!")
- }
- interceptor.intercepting = true
-
- // not working on windows...
-
- return nil
-}
-
-func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- // not working on windows...
- interceptor.intercepting = false
-
- return "", nil
-}
-
-func (interceptor *outputInterceptor) StreamTo(*os.File) {}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
deleted file mode 100644
index 93e9dac05..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
-
-The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
-This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
-
-*/
-
-package remote
-
-import (
- "encoding/json"
- "io/ioutil"
- "net"
- "net/http"
- "sync"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-/*
-Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
-It then forwards that communication to attached reporters.
-*/
-type Server struct {
- listener net.Listener
- reporters []reporters.Reporter
- alives []func() bool
- lock *sync.Mutex
- beforeSuiteData types.RemoteBeforeSuiteData
- parallelTotal int
- counter int
-}
-
-//Create a new server, automatically selecting a port
-func NewServer(parallelTotal int) (*Server, error) {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return nil, err
- }
- return &Server{
- listener: listener,
- lock: &sync.Mutex{},
- alives: make([]func() bool, parallelTotal),
- beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
- parallelTotal: parallelTotal,
- }, nil
-}
-
-//Start the server. You don't need to `go s.Start()`, just `s.Start()`
-func (server *Server) Start() {
- httpServer := &http.Server{}
- mux := http.NewServeMux()
- httpServer.Handler = mux
-
- //streaming endpoints
- mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
- mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
- mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
- mux.HandleFunc("/SpecWillRun", server.specWillRun)
- mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
- mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
-
- //synchronization endpoints
- mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
- mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
- mux.HandleFunc("/counter", server.handleCounter)
- mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
-
- go httpServer.Serve(server.listener)
-}
-
-//Stop the server
-func (server *Server) Close() {
- server.listener.Close()
-}
-
-//The address the server can be reached it. Pass this into the `ForwardingReporter`.
-func (server *Server) Address() string {
- return "http://" + server.listener.Addr().String()
-}
-
-//
-// Streaming Endpoints
-//
-
-//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
-func (server *Server) readAll(request *http.Request) []byte {
- defer request.Body.Close()
- body, _ := ioutil.ReadAll(request.Body)
- return body
-}
-
-func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
- server.reporters = reporters
-}
-
-func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
-
- var data struct {
- Config config.GinkgoConfigType `json:"config"`
- Summary *types.SuiteSummary `json:"suite-summary"`
- }
-
- json.Unmarshal(body, &data)
-
- for _, reporter := range server.reporters {
- reporter.SpecSuiteWillBegin(data.Config, data.Summary)
- }
-}
-
-func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var setupSummary *types.SetupSummary
- json.Unmarshal(body, &setupSummary)
-
- for _, reporter := range server.reporters {
- reporter.BeforeSuiteDidRun(setupSummary)
- }
-}
-
-func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var setupSummary *types.SetupSummary
- json.Unmarshal(body, &setupSummary)
-
- for _, reporter := range server.reporters {
- reporter.AfterSuiteDidRun(setupSummary)
- }
-}
-
-func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var specSummary *types.SpecSummary
- json.Unmarshal(body, &specSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecWillRun(specSummary)
- }
-}
-
-func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var specSummary *types.SpecSummary
- json.Unmarshal(body, &specSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecDidComplete(specSummary)
- }
-}
-
-func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var suiteSummary *types.SuiteSummary
- json.Unmarshal(body, &suiteSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecSuiteDidEnd(suiteSummary)
- }
-}
-
-//
-// Synchronization Endpoints
-//
-
-func (server *Server) RegisterAlive(node int, alive func() bool) {
- server.lock.Lock()
- defer server.lock.Unlock()
- server.alives[node-1] = alive
-}
-
-func (server *Server) nodeIsAlive(node int) bool {
- server.lock.Lock()
- defer server.lock.Unlock()
- alive := server.alives[node-1]
- if alive == nil {
- return true
- }
- return alive()
-}
-
-func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
- if request.Method == "POST" {
- dec := json.NewDecoder(request.Body)
- dec.Decode(&(server.beforeSuiteData))
- } else {
- beforeSuiteData := server.beforeSuiteData
- if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
- beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
- }
- enc := json.NewEncoder(writer)
- enc.Encode(beforeSuiteData)
- }
-}
-
-func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
- afterSuiteData := types.RemoteAfterSuiteData{
- CanRun: true,
- }
- for i := 2; i <= server.parallelTotal; i++ {
- afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
- }
-
- enc := json.NewEncoder(writer)
- enc.Encode(afterSuiteData)
-}
-
-func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
- c := spec_iterator.Counter{}
- server.lock.Lock()
- c.Index = server.counter
- server.counter++
- server.lock.Unlock()
-
- json.NewEncoder(writer).Encode(c)
-}
-
-func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
- writer.Write([]byte(""))
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
deleted file mode 100644
index 6eef40a0e..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package spec
-
-import (
- "fmt"
- "io"
- "time"
-
- "sync"
-
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-type Spec struct {
- subject leafnodes.SubjectNode
- focused bool
- announceProgress bool
-
- containers []*containernode.ContainerNode
-
- state types.SpecState
- runTime time.Duration
- startTime time.Time
- failure types.SpecFailure
- previousFailures bool
-
- stateMutex *sync.Mutex
-}
-
-func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
- spec := &Spec{
- subject: subject,
- containers: containers,
- focused: subject.Flag() == types.FlagTypeFocused,
- announceProgress: announceProgress,
- stateMutex: &sync.Mutex{},
- }
-
- spec.processFlag(subject.Flag())
- for i := len(containers) - 1; i >= 0; i-- {
- spec.processFlag(containers[i].Flag())
- }
-
- return spec
-}
-
-func (spec *Spec) processFlag(flag types.FlagType) {
- if flag == types.FlagTypeFocused {
- spec.focused = true
- } else if flag == types.FlagTypePending {
- spec.setState(types.SpecStatePending)
- }
-}
-
-func (spec *Spec) Skip() {
- spec.setState(types.SpecStateSkipped)
-}
-
-func (spec *Spec) Failed() bool {
- return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
-}
-
-func (spec *Spec) Passed() bool {
- return spec.getState() == types.SpecStatePassed
-}
-
-func (spec *Spec) Flaked() bool {
- return spec.getState() == types.SpecStatePassed && spec.previousFailures
-}
-
-func (spec *Spec) Pending() bool {
- return spec.getState() == types.SpecStatePending
-}
-
-func (spec *Spec) Skipped() bool {
- return spec.getState() == types.SpecStateSkipped
-}
-
-func (spec *Spec) Focused() bool {
- return spec.focused
-}
-
-func (spec *Spec) IsMeasurement() bool {
- return spec.subject.Type() == types.SpecComponentTypeMeasure
-}
-
-func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
- componentTexts := make([]string, len(spec.containers)+1)
- componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
-
- for i, container := range spec.containers {
- componentTexts[i] = container.Text()
- componentCodeLocations[i] = container.CodeLocation()
- }
-
- componentTexts[len(spec.containers)] = spec.subject.Text()
- componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
-
- runTime := spec.runTime
- if runTime == 0 && !spec.startTime.IsZero() {
- runTime = time.Since(spec.startTime)
- }
-
- return &types.SpecSummary{
- IsMeasurement: spec.IsMeasurement(),
- NumberOfSamples: spec.subject.Samples(),
- ComponentTexts: componentTexts,
- ComponentCodeLocations: componentCodeLocations,
- State: spec.getState(),
- RunTime: runTime,
- Failure: spec.failure,
- Measurements: spec.measurementsReport(),
- SuiteID: suiteID,
- }
-}
-
-func (spec *Spec) ConcatenatedString() string {
- s := ""
- for _, container := range spec.containers {
- s += container.Text() + " "
- }
-
- return s + spec.subject.Text()
-}
-
-func (spec *Spec) Run(writer io.Writer) {
- if spec.getState() == types.SpecStateFailed {
- spec.previousFailures = true
- }
-
- spec.startTime = time.Now()
- defer func() {
- spec.runTime = time.Since(spec.startTime)
- }()
-
- for sample := 0; sample < spec.subject.Samples(); sample++ {
- spec.runSample(sample, writer)
-
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
-}
-
-func (spec *Spec) getState() types.SpecState {
- spec.stateMutex.Lock()
- defer spec.stateMutex.Unlock()
- return spec.state
-}
-
-func (spec *Spec) setState(state types.SpecState) {
- spec.stateMutex.Lock()
- defer spec.stateMutex.Unlock()
- spec.state = state
-}
-
-func (spec *Spec) runSample(sample int, writer io.Writer) {
- spec.setState(types.SpecStatePassed)
- spec.failure = types.SpecFailure{}
- innerMostContainerIndexToUnwind := -1
-
- defer func() {
- for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
- container := spec.containers[i]
- for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
- spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
- justAfterEachState, justAfterEachFailure := justAfterEach.Run()
- if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
- spec.state = justAfterEachState
- spec.failure = justAfterEachFailure
- }
- }
- }
-
- for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
- container := spec.containers[i]
- for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
- spec.announceSetupNode(writer, "AfterEach", container, afterEach)
- afterEachState, afterEachFailure := afterEach.Run()
- if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
- spec.setState(afterEachState)
- spec.failure = afterEachFailure
- }
- }
- }
- }()
-
- for i, container := range spec.containers {
- innerMostContainerIndexToUnwind = i
- for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
- spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
- s, f := beforeEach.Run()
- spec.failure = f
- spec.setState(s)
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
- }
-
- for _, container := range spec.containers {
- for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
- spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
- s, f := justBeforeEach.Run()
- spec.failure = f
- spec.setState(s)
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
- }
-
- spec.announceSubject(writer, spec.subject)
- s, f := spec.subject.Run()
- spec.failure = f
- spec.setState(s)
-}
-
-func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
- if spec.announceProgress {
- s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
- writer.Write([]byte(s))
- }
-}
-
-func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
- if spec.announceProgress {
- nodeType := ""
- switch subject.Type() {
- case types.SpecComponentTypeIt:
- nodeType = "It"
- case types.SpecComponentTypeMeasure:
- nodeType = "Measure"
- }
- s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
- writer.Write([]byte(s))
- }
-}
-
-func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
- if !spec.IsMeasurement() || spec.Failed() {
- return map[string]*types.SpecMeasurement{}
- }
-
- return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
deleted file mode 100644
index 0a24139fb..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package spec
-
-import (
- "math/rand"
- "regexp"
- "sort"
- "strings"
-)
-
-type Specs struct {
- specs []*Spec
- names []string
-
- hasProgrammaticFocus bool
- RegexScansFilePath bool
-}
-
-func NewSpecs(specs []*Spec) *Specs {
- names := make([]string, len(specs))
- for i, spec := range specs {
- names[i] = spec.ConcatenatedString()
- }
- return &Specs{
- specs: specs,
- names: names,
- }
-}
-
-func (e *Specs) Specs() []*Spec {
- return e.specs
-}
-
-func (e *Specs) HasProgrammaticFocus() bool {
- return e.hasProgrammaticFocus
-}
-
-func (e *Specs) Shuffle(r *rand.Rand) {
- sort.Sort(e)
- permutation := r.Perm(len(e.specs))
- shuffledSpecs := make([]*Spec, len(e.specs))
- names := make([]string, len(e.specs))
- for i, j := range permutation {
- shuffledSpecs[i] = e.specs[j]
- names[i] = e.names[j]
- }
- e.specs = shuffledSpecs
- e.names = names
-}
-
-func (e *Specs) ApplyFocus(description string, focus, skip []string) {
- if len(focus)+len(skip) == 0 {
- e.applyProgrammaticFocus()
- } else {
- e.applyRegExpFocusAndSkip(description, focus, skip)
- }
-}
-
-func (e *Specs) applyProgrammaticFocus() {
- e.hasProgrammaticFocus = false
- for _, spec := range e.specs {
- if spec.Focused() && !spec.Pending() {
- e.hasProgrammaticFocus = true
- break
- }
- }
-
- if e.hasProgrammaticFocus {
- for _, spec := range e.specs {
- if !spec.Focused() {
- spec.Skip()
- }
- }
- }
-}
-
-// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
-// this is the place which we append to.
-func (e *Specs) toMatch(description string, i int) []byte {
- if i > len(e.names) {
- return nil
- }
- if e.RegexScansFilePath {
- return []byte(
- description + " " +
- e.names[i] + " " +
- e.specs[i].subject.CodeLocation().FileName)
- } else {
- return []byte(
- description + " " +
- e.names[i])
- }
-}
-
-func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) {
- var focusFilter, skipFilter *regexp.Regexp
- if len(focus) > 0 {
- focusFilter = regexp.MustCompile(strings.Join(focus, "|"))
- }
- if len(skip) > 0 {
- skipFilter = regexp.MustCompile(strings.Join(skip, "|"))
- }
-
- for i, spec := range e.specs {
- matchesFocus := true
- matchesSkip := false
-
- toMatch := e.toMatch(description, i)
-
- if focusFilter != nil {
- matchesFocus = focusFilter.Match(toMatch)
- }
-
- if skipFilter != nil {
- matchesSkip = skipFilter.Match(toMatch)
- }
-
- if !matchesFocus || matchesSkip {
- spec.Skip()
- }
- }
-}
-
-func (e *Specs) SkipMeasurements() {
- for _, spec := range e.specs {
- if spec.IsMeasurement() {
- spec.Skip()
- }
- }
-}
-
-//sort.Interface
-
-func (e *Specs) Len() int {
- return len(e.specs)
-}
-
-func (e *Specs) Less(i, j int) bool {
- return e.names[i] < e.names[j]
-}
-
-func (e *Specs) Swap(i, j int) {
- e.names[i], e.names[j] = e.names[j], e.names[i]
- e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
deleted file mode 100644
index 82272554a..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package spec_iterator
-
-func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
- if length == 0 {
- return 0, 0
- }
-
- // We have more nodes than tests. Trivial case.
- if parallelTotal >= length {
- if parallelNode > length {
- return 0, 0
- } else {
- return parallelNode - 1, 1
- }
- }
-
- // This is the minimum amount of tests that a node will be required to run
- minTestsPerNode := length / parallelTotal
-
- // This is the maximum amount of tests that a node will be required to run
- // The algorithm guarantees that this would be equal to at least the minimum amount
- // and at most one more
- maxTestsPerNode := minTestsPerNode
- if length%parallelTotal != 0 {
- maxTestsPerNode++
- }
-
- // Number of nodes that will have to run the maximum amount of tests per node
- numMaxLoadNodes := length % parallelTotal
-
- // Number of nodes that precede the current node and will have to run the maximum amount of tests per node
- var numPrecedingMaxLoadNodes int
- if parallelNode > numMaxLoadNodes {
- numPrecedingMaxLoadNodes = numMaxLoadNodes
- } else {
- numPrecedingMaxLoadNodes = parallelNode - 1
- }
-
- // Number of nodes that precede the current node and will have to run the minimum amount of tests per node
- var numPrecedingMinLoadNodes int
- if parallelNode <= numMaxLoadNodes {
- numPrecedingMinLoadNodes = 0
- } else {
- numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
- }
-
- // Evaluate the test start index and number of tests to run
- startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
- if parallelNode > numMaxLoadNodes {
- count = minTestsPerNode
- } else {
- count = maxTestsPerNode
- }
- return
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
deleted file mode 100644
index 99f548bca..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package spec_iterator
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-type ParallelIterator struct {
- specs []*spec.Spec
- host string
- client *http.Client
-}
-
-func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
- return &ParallelIterator{
- specs: specs,
- host: host,
- client: &http.Client{},
- }
-}
-
-func (s *ParallelIterator) Next() (*spec.Spec, error) {
- resp, err := s.client.Get(s.host + "/counter")
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
- }
-
- var counter Counter
- err = json.NewDecoder(resp.Body).Decode(&counter)
- if err != nil {
- return nil, err
- }
-
- if counter.Index >= len(s.specs) {
- return nil, ErrClosed
- }
-
- return s.specs[counter.Index], nil
-}
-
-func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return -1, false
-}
-
-func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- return -1, false
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
deleted file mode 100644
index a51c93b8b..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package spec_iterator
-
-import (
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-type SerialIterator struct {
- specs []*spec.Spec
- index int
-}
-
-func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
- return &SerialIterator{
- specs: specs,
- index: 0,
- }
-}
-
-func (s *SerialIterator) Next() (*spec.Spec, error) {
- if s.index >= len(s.specs) {
- return nil, ErrClosed
- }
-
- spec := s.specs[s.index]
- s.index += 1
- return spec, nil
-}
-
-func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return len(s.specs), true
-}
-
-func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- count := 0
- for _, s := range s.specs {
- if !s.Skipped() && !s.Pending() {
- count += 1
- }
- }
- return count, true
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
deleted file mode 100644
index ad4a3ea3c..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package spec_iterator
-
-import "github.com/onsi/ginkgo/internal/spec"
-
-type ShardedParallelIterator struct {
- specs []*spec.Spec
- index int
- maxIndex int
-}
-
-func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
- startIndex, count := ParallelizedIndexRange(len(specs), total, node)
-
- return &ShardedParallelIterator{
- specs: specs,
- index: startIndex,
- maxIndex: startIndex + count,
- }
-}
-
-func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
- if s.index >= s.maxIndex {
- return nil, ErrClosed
- }
-
- spec := s.specs[s.index]
- s.index += 1
- return spec, nil
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return s.maxIndex - s.index, true
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- count := 0
- for i := s.index; i < s.maxIndex; i += 1 {
- if !s.specs[i].Skipped() && !s.specs[i].Pending() {
- count += 1
- }
- }
- return count, true
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
deleted file mode 100644
index 74bffad64..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package spec_iterator
-
-import (
- "errors"
-
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-var ErrClosed = errors.New("no more specs to run")
-
-type SpecIterator interface {
- Next() (*spec.Spec, error)
- NumberOfSpecsPriorToIteration() int
- NumberOfSpecsToProcessIfKnown() (int, bool)
- NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
-}
-
-type Counter struct {
- Index int `json:"index"`
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
deleted file mode 100644
index a0b8b62d5..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package specrunner
-
-import (
- "crypto/rand"
- "fmt"
-)
-
-func randomID() string {
- b := make([]byte, 8)
- _, err := rand.Read(b)
- if err != nil {
- return ""
- }
- return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
deleted file mode 100644
index c9a0a60d8..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
+++ /dev/null
@@ -1,411 +0,0 @@
-package specrunner
-
-import (
- "fmt"
- "os"
- "os/signal"
- "sync"
- "syscall"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- Writer "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-
- "time"
-)
-
-type SpecRunner struct {
- description string
- beforeSuiteNode leafnodes.SuiteNode
- iterator spec_iterator.SpecIterator
- afterSuiteNode leafnodes.SuiteNode
- reporters []reporters.Reporter
- startTime time.Time
- suiteID string
- runningSpec *spec.Spec
- writer Writer.WriterInterface
- config config.GinkgoConfigType
- interrupted bool
- processedSpecs []*spec.Spec
- lock *sync.Mutex
-}
-
-func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
- return &SpecRunner{
- description: description,
- beforeSuiteNode: beforeSuiteNode,
- iterator: iterator,
- afterSuiteNode: afterSuiteNode,
- reporters: reporters,
- writer: writer,
- config: config,
- suiteID: randomID(),
- lock: &sync.Mutex{},
- }
-}
-
-func (runner *SpecRunner) Run() bool {
- if runner.config.DryRun {
- runner.performDryRun()
- return true
- }
-
- runner.reportSuiteWillBegin()
- signalRegistered := make(chan struct{})
- go runner.registerForInterrupts(signalRegistered)
- <-signalRegistered
-
- suitePassed := runner.runBeforeSuite()
-
- if suitePassed {
- suitePassed = runner.runSpecs()
- }
-
- runner.blockForeverIfInterrupted()
-
- suitePassed = runner.runAfterSuite() && suitePassed
-
- runner.reportSuiteDidEnd(suitePassed)
-
- return suitePassed
-}
-
-func (runner *SpecRunner) performDryRun() {
- runner.reportSuiteWillBegin()
-
- if runner.beforeSuiteNode != nil {
- summary := runner.beforeSuiteNode.Summary()
- summary.State = types.SpecStatePassed
- runner.reportBeforeSuite(summary)
- }
-
- for {
- spec, err := runner.iterator.Next()
- if err == spec_iterator.ErrClosed {
- break
- }
- if err != nil {
- fmt.Println("failed to iterate over tests:\n" + err.Error())
- break
- }
-
- runner.processedSpecs = append(runner.processedSpecs, spec)
-
- summary := spec.Summary(runner.suiteID)
- runner.reportSpecWillRun(summary)
- if summary.State == types.SpecStateInvalid {
- summary.State = types.SpecStatePassed
- }
- runner.reportSpecDidComplete(summary, false)
- }
-
- if runner.afterSuiteNode != nil {
- summary := runner.afterSuiteNode.Summary()
- summary.State = types.SpecStatePassed
- runner.reportAfterSuite(summary)
- }
-
- runner.reportSuiteDidEnd(true)
-}
-
-func (runner *SpecRunner) runBeforeSuite() bool {
- if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
- return true
- }
-
- runner.writer.Truncate()
- conf := runner.config
- passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
- if !passed {
- runner.writer.DumpOut()
- }
- runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
- return passed
-}
-
-func (runner *SpecRunner) runAfterSuite() bool {
- if runner.afterSuiteNode == nil {
- return true
- }
-
- runner.writer.Truncate()
- conf := runner.config
- passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
- if !passed {
- runner.writer.DumpOut()
- }
- runner.reportAfterSuite(runner.afterSuiteNode.Summary())
- return passed
-}
-
-func (runner *SpecRunner) runSpecs() bool {
- suiteFailed := false
- skipRemainingSpecs := false
- for {
- spec, err := runner.iterator.Next()
- if err == spec_iterator.ErrClosed {
- break
- }
- if err != nil {
- fmt.Println("failed to iterate over tests:\n" + err.Error())
- suiteFailed = true
- break
- }
-
- runner.processedSpecs = append(runner.processedSpecs, spec)
-
- if runner.wasInterrupted() {
- break
- }
- if skipRemainingSpecs {
- spec.Skip()
- }
-
- if !spec.Skipped() && !spec.Pending() {
- if passed := runner.runSpec(spec); !passed {
- suiteFailed = true
- }
- } else if spec.Pending() && runner.config.FailOnPending {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- suiteFailed = true
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- } else {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- }
-
- if spec.Failed() && runner.config.FailFast {
- skipRemainingSpecs = true
- }
- }
-
- return !suiteFailed
-}
-
-func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
- maxAttempts := 1
- if runner.config.FlakeAttempts > 0 {
- // uninitialized configs count as 1
- maxAttempts = runner.config.FlakeAttempts
- }
-
- for i := 0; i < maxAttempts; i++ {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- runner.runningSpec = spec
- spec.Run(runner.writer)
- runner.runningSpec = nil
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- if !spec.Failed() {
- return true
- }
- }
- return false
-}
-
-func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
- if runner.runningSpec == nil {
- return nil, false
- }
-
- return runner.runningSpec.Summary(runner.suiteID), true
-}
-
-func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
- close(signalRegistered)
-
- <-c
- signal.Stop(c)
- runner.markInterrupted()
- go runner.registerForHardInterrupts()
- runner.writer.DumpOutWithHeader(`
-Received interrupt. Emitting contents of GinkgoWriter...
----------------------------------------------------------
-`)
- if runner.afterSuiteNode != nil {
- fmt.Fprint(os.Stderr, `
----------------------------------------------------------
-Received interrupt. Running AfterSuite...
-^C again to terminate immediately
-`)
- runner.runAfterSuite()
- }
- runner.reportSuiteDidEnd(false)
- os.Exit(1)
-}
-
-func (runner *SpecRunner) registerForHardInterrupts() {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
-
- <-c
- fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
- os.Exit(1)
-}
-
-func (runner *SpecRunner) blockForeverIfInterrupted() {
- runner.lock.Lock()
- interrupted := runner.interrupted
- runner.lock.Unlock()
-
- if interrupted {
- select {}
- }
-}
-
-func (runner *SpecRunner) markInterrupted() {
- runner.lock.Lock()
- defer runner.lock.Unlock()
- runner.interrupted = true
-}
-
-func (runner *SpecRunner) wasInterrupted() bool {
- runner.lock.Lock()
- defer runner.lock.Unlock()
- return runner.interrupted
-}
-
-func (runner *SpecRunner) reportSuiteWillBegin() {
- runner.startTime = time.Now()
- summary := runner.suiteWillBeginSummary()
- for _, reporter := range runner.reporters {
- reporter.SpecSuiteWillBegin(runner.config, summary)
- }
-}
-
-func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
- for _, reporter := range runner.reporters {
- reporter.BeforeSuiteDidRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
- for _, reporter := range runner.reporters {
- reporter.AfterSuiteDidRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
- runner.writer.Truncate()
-
- for _, reporter := range runner.reporters {
- reporter.SpecWillRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
- if len(summary.CapturedOutput) == 0 {
- summary.CapturedOutput = string(runner.writer.Bytes())
- }
- for i := len(runner.reporters) - 1; i >= 1; i-- {
- runner.reporters[i].SpecDidComplete(summary)
- }
-
- if failed {
- runner.writer.DumpOut()
- }
-
- runner.reporters[0].SpecDidComplete(summary)
-}
-
-func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
- summary := runner.suiteDidEndSummary(success)
- summary.RunTime = time.Since(runner.startTime)
- for _, reporter := range runner.reporters {
- reporter.SpecSuiteDidEnd(summary)
- }
-}
-
-func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
- count = 0
-
- for _, spec := range runner.processedSpecs {
- if filter(spec) {
- count++
- }
- }
-
- return count
-}
-
-func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
- numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return !ex.Skipped() && !ex.Pending()
- })
-
- numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Pending()
- })
-
- numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Skipped()
- })
-
- numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Passed()
- })
-
- numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Flaked()
- })
-
- numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Failed()
- })
-
- if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
- var known bool
- numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
- if !known {
- numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
- }
- numberOfFailedSpecs = numberOfSpecsThatWillBeRun
- }
-
- return &types.SuiteSummary{
- SuiteDescription: runner.description,
- SuiteSucceeded: success,
- SuiteID: runner.suiteID,
-
- NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
- NumberOfTotalSpecs: len(runner.processedSpecs),
- NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
- NumberOfPendingSpecs: numberOfPendingSpecs,
- NumberOfSkippedSpecs: numberOfSkippedSpecs,
- NumberOfPassedSpecs: numberOfPassedSpecs,
- NumberOfFailedSpecs: numberOfFailedSpecs,
- NumberOfFlakedSpecs: numberOfFlakedSpecs,
- }
-}
-
-func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
- numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
- if !known {
- numTotal = -1
- }
-
- numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
- if !known {
- numToRun = -1
- }
-
- return &types.SuiteSummary{
- SuiteDescription: runner.description,
- SuiteID: runner.suiteID,
-
- NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
- NumberOfTotalSpecs: numTotal,
- NumberOfSpecsThatWillBeRun: numToRun,
- NumberOfPendingSpecs: -1,
- NumberOfSkippedSpecs: -1,
- NumberOfPassedSpecs: -1,
- NumberOfFailedSpecs: -1,
- NumberOfFlakedSpecs: -1,
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
deleted file mode 100644
index b4a83c432..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package suite
-
-import (
- "math/rand"
- "net/http"
- "time"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- "github.com/onsi/ginkgo/internal/specrunner"
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-type ginkgoTestingT interface {
- Fail()
-}
-
-type deferredContainerNode struct {
- text string
- body func()
- flag types.FlagType
- codeLocation types.CodeLocation
-}
-
-type Suite struct {
- topLevelContainer *containernode.ContainerNode
- currentContainer *containernode.ContainerNode
-
- deferredContainerNodes []deferredContainerNode
-
- containerIndex int
- beforeSuiteNode leafnodes.SuiteNode
- afterSuiteNode leafnodes.SuiteNode
- runner *specrunner.SpecRunner
- failer *failer.Failer
- running bool
- expandTopLevelNodes bool
-}
-
-func New(failer *failer.Failer) *Suite {
- topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
-
- return &Suite{
- topLevelContainer: topLevelContainer,
- currentContainer: topLevelContainer,
- failer: failer,
- containerIndex: 1,
- deferredContainerNodes: []deferredContainerNode{},
- }
-}
-
-func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
- if config.ParallelTotal < 1 {
- panic("ginkgo.parallel.total must be >= 1")
- }
-
- if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
- panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
- }
-
- suite.expandTopLevelNodes = true
- for _, deferredNode := range suite.deferredContainerNodes {
- suite.PushContainerNode(deferredNode.text, deferredNode.body, deferredNode.flag, deferredNode.codeLocation)
- }
-
- r := rand.New(rand.NewSource(config.RandomSeed))
- suite.topLevelContainer.Shuffle(r)
- iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
- suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
-
- suite.running = true
- success := suite.runner.Run()
- if !success {
- t.Fail()
- }
- return success, hasProgrammaticFocus
-}
-
-func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
- specsSlice := []*spec.Spec{}
- suite.topLevelContainer.BackPropagateProgrammaticFocus()
- for _, collatedNodes := range suite.topLevelContainer.Collate() {
- specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
- }
-
- specs := spec.NewSpecs(specsSlice)
- specs.RegexScansFilePath = config.RegexScansFilePath
-
- if config.RandomizeAllSpecs {
- specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
- }
-
- specs.ApplyFocus(description, config.FocusStrings, config.SkipStrings)
-
- if config.SkipMeasurements {
- specs.SkipMeasurements()
- }
-
- var iterator spec_iterator.SpecIterator
-
- if config.ParallelTotal > 1 {
- iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
- resp, err := http.Get(config.SyncHost + "/has-counter")
- if err != nil || resp.StatusCode != http.StatusOK {
- iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
- }
- } else {
- iterator = spec_iterator.NewSerialIterator(specs.Specs())
- }
-
- return iterator, specs.HasProgrammaticFocus()
-}
-
-func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
- if !suite.running {
- return nil, false
- }
- return suite.runner.CurrentSpecSummary()
-}
-
-func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.beforeSuiteNode != nil {
- panic("You may only call BeforeSuite once!")
- }
- suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.afterSuiteNode != nil {
- panic("You may only call AfterSuite once!")
- }
- suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.beforeSuiteNode != nil {
- panic("You may only call BeforeSuite once!")
- }
- suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.afterSuiteNode != nil {
- panic("You may only call AfterSuite once!")
- }
- suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
- /*
- We defer walking the container nodes (which immediately evaluates the `body` function)
- until `RunSpecs` is called. We do this by storing off the deferred container nodes. Then, when
- `RunSpecs` is called we actually go through and add the container nodes to the test structure.
-
- This allows us to defer calling all the `body` functions until _after_ the top level functions
- have been walked, _after_ func init()s have been called, and _after_ `go test` has called `flag.Parse()`.
-
- This allows users to load up configuration information in the `TestX` go test hook just before `RunSpecs`
- is invoked and solves issues like #693 and makes the lifecycle easier to reason about.
-
- */
- if !suite.expandTopLevelNodes {
- suite.deferredContainerNodes = append(suite.deferredContainerNodes, deferredContainerNode{text, body, flag, codeLocation})
- return
- }
-
- container := containernode.New(text, flag, codeLocation)
- suite.currentContainer.PushContainerNode(container)
-
- previousContainer := suite.currentContainer
- suite.currentContainer = container
- suite.containerIndex++
-
- body()
-
- suite.containerIndex--
- suite.currentContainer = previousContainer
-}
-
-func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
- if suite.running {
- suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
deleted file mode 100644
index 4dcfaf4cd..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package testingtproxy
-
-import (
- "fmt"
- "io"
-)
-
-type failFunc func(message string, callerSkip ...int)
-type skipFunc func(message string, callerSkip ...int)
-type failedFunc func() bool
-type nameFunc func() string
-
-func New(writer io.Writer, fail failFunc, skip skipFunc, failed failedFunc, name nameFunc, offset int) *ginkgoTestingTProxy {
- return &ginkgoTestingTProxy{
- fail: fail,
- offset: offset,
- writer: writer,
- skip: skip,
- failed: failed,
- name: name,
- }
-}
-
-type ginkgoTestingTProxy struct {
- fail failFunc
- skip skipFunc
- failed failedFunc
- name nameFunc
- offset int
- writer io.Writer
-}
-
-func (t *ginkgoTestingTProxy) Cleanup(func()) {
- // No-op
-}
-
-func (t *ginkgoTestingTProxy) Setenv(kev, value string) {
- fmt.Println("Setenv is a noop for Ginkgo at the moment but will be implemented in V2")
- // No-op until Cleanup is implemented
-}
-
-func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
- t.fail(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
- t.fail(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fail() {
- t.fail("failed", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) FailNow() {
- t.fail("failed", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Failed() bool {
- return t.failed()
-}
-
-func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
- t.fail(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
- t.fail(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Helper() {
- // No-op
-}
-
-func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
- fmt.Fprintln(t.writer, args...)
-}
-
-func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
- t.Log(fmt.Sprintf(format, args...))
-}
-
-func (t *ginkgoTestingTProxy) Name() string {
- return t.name()
-}
-
-func (t *ginkgoTestingTProxy) Parallel() {
- // No-op
-}
-
-func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
- t.skip(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) SkipNow() {
- t.skip("skip", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
- t.skip(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Skipped() bool {
- return false
-}
-
-func (t *ginkgoTestingTProxy) TempDir() string {
- // No-op
- return ""
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
deleted file mode 100644
index 6739c3f60..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package writer
-
-type FakeGinkgoWriter struct {
- EventStream []string
-}
-
-func NewFake() *FakeGinkgoWriter {
- return &FakeGinkgoWriter{
- EventStream: []string{},
- }
-}
-
-func (writer *FakeGinkgoWriter) AddEvent(event string) {
- writer.EventStream = append(writer.EventStream, event)
-}
-
-func (writer *FakeGinkgoWriter) Truncate() {
- writer.EventStream = append(writer.EventStream, "TRUNCATE")
-}
-
-func (writer *FakeGinkgoWriter) DumpOut() {
- writer.EventStream = append(writer.EventStream, "DUMP")
-}
-
-func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
- writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
-}
-
-func (writer *FakeGinkgoWriter) Bytes() []byte {
- writer.EventStream = append(writer.EventStream, "BYTES")
- return nil
-}
-
-func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
- return 0, nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
deleted file mode 100644
index 98eca3bdd..000000000
--- a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package writer
-
-import (
- "bytes"
- "io"
- "sync"
-)
-
-type WriterInterface interface {
- io.Writer
-
- Truncate()
- DumpOut()
- DumpOutWithHeader(header string)
- Bytes() []byte
-}
-
-type Writer struct {
- buffer *bytes.Buffer
- outWriter io.Writer
- lock *sync.Mutex
- stream bool
- redirector io.Writer
-}
-
-func New(outWriter io.Writer) *Writer {
- return &Writer{
- buffer: &bytes.Buffer{},
- lock: &sync.Mutex{},
- outWriter: outWriter,
- stream: true,
- }
-}
-
-func (w *Writer) AndRedirectTo(writer io.Writer) {
- w.redirector = writer
-}
-
-func (w *Writer) SetStream(stream bool) {
- w.lock.Lock()
- defer w.lock.Unlock()
- w.stream = stream
-}
-
-func (w *Writer) Write(b []byte) (n int, err error) {
- w.lock.Lock()
- defer w.lock.Unlock()
-
- n, err = w.buffer.Write(b)
- if w.redirector != nil {
- w.redirector.Write(b)
- }
- if w.stream {
- return w.outWriter.Write(b)
- }
- return n, err
-}
-
-func (w *Writer) Truncate() {
- w.lock.Lock()
- defer w.lock.Unlock()
- w.buffer.Reset()
-}
-
-func (w *Writer) DumpOut() {
- w.lock.Lock()
- defer w.lock.Unlock()
- if !w.stream {
- w.buffer.WriteTo(w.outWriter)
- }
-}
-
-func (w *Writer) Bytes() []byte {
- w.lock.Lock()
- defer w.lock.Unlock()
- b := w.buffer.Bytes()
- copied := make([]byte, len(b))
- copy(copied, b)
- return copied
-}
-
-func (w *Writer) DumpOutWithHeader(header string) {
- w.lock.Lock()
- defer w.lock.Unlock()
- if !w.stream && w.buffer.Len() > 0 {
- w.outWriter.Write([]byte(header))
- w.buffer.WriteTo(w.outWriter)
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
deleted file mode 100644
index f0c9f6141..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Ginkgo's Default Reporter
-
-A number of command line flags are available to tweak Ginkgo's default output.
-
-These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
-*/
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-type DefaultReporter struct {
- config config.DefaultReporterConfigType
- stenographer stenographer.Stenographer
- specSummaries []*types.SpecSummary
-}
-
-func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
- return &DefaultReporter{
- config: config,
- stenographer: stenographer,
- }
-}
-
-func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
- if config.ParallelTotal > 1 {
- reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
- } else {
- reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
- }
-}
-
-func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-}
-
-func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-}
-
-func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
- reporter.stenographer.AnnounceSpecWillRun(specSummary)
- }
-}
-
-func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- switch specSummary.State {
- case types.SpecStatePassed:
- if specSummary.IsMeasurement {
- reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
- } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
- reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
- } else {
- reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
- if reporter.config.ReportPassed {
- reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
- }
- }
- case types.SpecStatePending:
- reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
- case types.SpecStateSkipped:
- reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
- case types.SpecStateTimedOut:
- reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- case types.SpecStatePanicked:
- reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- case types.SpecStateFailed:
- reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-
- reporter.specSummaries = append(reporter.specSummaries, specSummary)
-}
-
-func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.stenographer.SummarizeFailures(reporter.specSummaries)
- reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
deleted file mode 100644
index 27db47949..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-//FakeReporter is useful for testing purposes
-type FakeReporter struct {
- Config config.GinkgoConfigType
-
- BeginSummary *types.SuiteSummary
- BeforeSuiteSummary *types.SetupSummary
- SpecWillRunSummaries []*types.SpecSummary
- SpecSummaries []*types.SpecSummary
- AfterSuiteSummary *types.SetupSummary
- EndSummary *types.SuiteSummary
-
- SpecWillRunStub func(specSummary *types.SpecSummary)
- SpecDidCompleteStub func(specSummary *types.SpecSummary)
-}
-
-func NewFakeReporter() *FakeReporter {
- return &FakeReporter{
- SpecWillRunSummaries: make([]*types.SpecSummary, 0),
- SpecSummaries: make([]*types.SpecSummary, 0),
- }
-}
-
-func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- fakeR.Config = config
- fakeR.BeginSummary = summary
-}
-
-func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- fakeR.BeforeSuiteSummary = setupSummary
-}
-
-func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if fakeR.SpecWillRunStub != nil {
- fakeR.SpecWillRunStub(specSummary)
- }
- fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
-}
-
-func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- if fakeR.SpecDidCompleteStub != nil {
- fakeR.SpecDidCompleteStub(specSummary)
- }
- fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
-}
-
-func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- fakeR.AfterSuiteSummary = setupSummary
-}
-
-func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- fakeR.EndSummary = summary
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
deleted file mode 100644
index 01ddca6e1..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
-
-JUnit XML Reporter for Ginkgo
-
-For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
-
-*/
-
-package reporters
-
-import (
- "encoding/xml"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-type JUnitTestSuite struct {
- XMLName xml.Name `xml:"testsuite"`
- TestCases []JUnitTestCase `xml:"testcase"`
- Name string `xml:"name,attr"`
- Tests int `xml:"tests,attr"`
- Failures int `xml:"failures,attr"`
- Errors int `xml:"errors,attr"`
- Time float64 `xml:"time,attr"`
-}
-
-type JUnitTestCase struct {
- Name string `xml:"name,attr"`
- ClassName string `xml:"classname,attr"`
- FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
- Skipped *JUnitSkipped `xml:"skipped,omitempty"`
- Time float64 `xml:"time,attr"`
- SystemOut string `xml:"system-out,omitempty"`
-}
-
-type JUnitFailureMessage struct {
- Type string `xml:"type,attr"`
- Message string `xml:",chardata"`
-}
-
-type JUnitSkipped struct {
- Message string `xml:",chardata"`
-}
-
-type JUnitReporter struct {
- suite JUnitTestSuite
- filename string
- testSuiteName string
- ReporterConfig config.DefaultReporterConfigType
-}
-
-//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
-func NewJUnitReporter(filename string) *JUnitReporter {
- return &JUnitReporter{
- filename: filename,
- }
-}
-
-func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.suite = JUnitTestSuite{
- Name: summary.SuiteDescription,
- TestCases: []JUnitTestCase{},
- }
- reporter.testSuiteName = summary.SuiteDescription
- reporter.ReporterConfig = config.DefaultReporterConfig
-}
-
-func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
-}
-
-func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("BeforeSuite", setupSummary)
-}
-
-func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("AfterSuite", setupSummary)
-}
-
-func failureMessage(failure types.SpecFailure) string {
- return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
-}
-
-func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- testCase := JUnitTestCase{
- Name: name,
- ClassName: reporter.testSuiteName,
- }
-
- testCase.FailureMessage = &JUnitFailureMessage{
- Type: reporter.failureTypeForState(setupSummary.State),
- Message: failureMessage(setupSummary.Failure),
- }
- testCase.SystemOut = setupSummary.CapturedOutput
- testCase.Time = setupSummary.RunTime.Seconds()
- reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
- }
-}
-
-func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- testCase := JUnitTestCase{
- Name: strings.Join(specSummary.ComponentTexts[1:], " "),
- ClassName: reporter.testSuiteName,
- }
- if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
- testCase.SystemOut = specSummary.CapturedOutput
- }
- if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
- testCase.FailureMessage = &JUnitFailureMessage{
- Type: reporter.failureTypeForState(specSummary.State),
- Message: failureMessage(specSummary.Failure),
- }
- if specSummary.State == types.SpecStatePanicked {
- testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
- specSummary.Failure.ForwardedPanic,
- specSummary.Failure.Location.FullStackTrace)
- }
- testCase.SystemOut = specSummary.CapturedOutput
- }
- if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
- testCase.Skipped = &JUnitSkipped{}
- if specSummary.Failure.Message != "" {
- testCase.Skipped.Message = failureMessage(specSummary.Failure)
- }
- }
- testCase.Time = specSummary.RunTime.Seconds()
- reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
-}
-
-func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
- reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
- reporter.suite.Failures = summary.NumberOfFailedSpecs
- reporter.suite.Errors = 0
- if reporter.ReporterConfig.ReportFile != "" {
- reporter.filename = reporter.ReporterConfig.ReportFile
- fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
- }
- filePath, _ := filepath.Abs(reporter.filename)
- dirPath := filepath.Dir(filePath)
- err := os.MkdirAll(dirPath, os.ModePerm)
- if err != nil {
- fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
- }
- file, err := os.Create(filePath)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
- }
- defer file.Close()
- file.WriteString(xml.Header)
- encoder := xml.NewEncoder(file)
- encoder.Indent(" ", " ")
- err = encoder.Encode(reporter.suite)
- if err == nil {
- fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
- } else {
- fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
- }
-}
-
-func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
- switch state {
- case types.SpecStateFailed:
- return "Failure"
- case types.SpecStateTimedOut:
- return "Timeout"
- case types.SpecStatePanicked:
- return "Panic"
- default:
- return ""
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go
deleted file mode 100644
index 348b9dfce..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/reporter.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-type Reporter interface {
- SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
- BeforeSuiteDidRun(setupSummary *types.SetupSummary)
- SpecWillRun(specSummary *types.SpecSummary)
- SpecDidComplete(specSummary *types.SpecSummary)
- AfterSuiteDidRun(setupSummary *types.SetupSummary)
- SpecSuiteDidEnd(summary *types.SuiteSummary)
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
deleted file mode 100644
index 45b8f8869..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package stenographer
-
-import (
- "fmt"
- "strings"
-)
-
-func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
- var out string
-
- if len(args) > 0 {
- out = fmt.Sprintf(format, args...)
- } else {
- out = format
- }
-
- if s.color {
- return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
- } else {
- return out
- }
-}
-
-func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
- fmt.Fprintln(s.w, text)
- fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
-}
-
-func (s *consoleStenographer) printNewLine() {
- fmt.Fprintln(s.w, "")
-}
-
-func (s *consoleStenographer) printDelimiter() {
- fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
-}
-
-func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
- fmt.Fprint(s.w, s.indent(indentation, format, args...))
-}
-
-func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
- fmt.Fprintln(s.w, s.indent(indentation, format, args...))
-}
-
-func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
- var text string
-
- if len(args) > 0 {
- text = fmt.Sprintf(format, args...)
- } else {
- text = format
- }
-
- stringArray := strings.Split(text, "\n")
- padding := ""
- if indentation >= 0 {
- padding = strings.Repeat(" ", indentation)
- }
- for i, s := range stringArray {
- stringArray[i] = fmt.Sprintf("%s%s", padding, s)
- }
-
- return strings.Join(stringArray, "\n")
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
deleted file mode 100644
index 1aa5b9db0..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package stenographer
-
-import (
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
- return FakeStenographerCall{
- Method: method,
- Args: args,
- }
-}
-
-type FakeStenographer struct {
- calls []FakeStenographerCall
- lock *sync.Mutex
-}
-
-type FakeStenographerCall struct {
- Method string
- Args []interface{}
-}
-
-func NewFakeStenographer() *FakeStenographer {
- stenographer := &FakeStenographer{
- lock: &sync.Mutex{},
- }
- stenographer.Reset()
- return stenographer
-}
-
-func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- return stenographer.calls
-}
-
-func (stenographer *FakeStenographer) Reset() {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- stenographer.calls = make([]FakeStenographerCall, 0)
-}
-
-func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- results := make([]FakeStenographerCall, 0)
- for _, call := range stenographer.calls {
- if call.Method == method {
- results = append(results, call)
- }
- }
-
- return results
-}
-
-func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
-}
-
-func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
- stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
- stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
- stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
- stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
- stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
- stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSpecWillRun", spec)
-}
-
-func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
-}
-func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
- stenographer.registerCall("AnnounceCapturedOutput", output)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSuccessfulSpec", spec)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
- stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
-}
-
-func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
- stenographer.registerCall("SummarizeFailures", summaries)
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
deleted file mode 100644
index 638d6fbb1..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
-The stenographer is used by Ginkgo's reporters to generate output.
-
-Move along, nothing to see here.
-*/
-
-package stenographer
-
-import (
- "fmt"
- "io"
- "runtime"
- "strings"
-
- "github.com/onsi/ginkgo/types"
-)
-
-const defaultStyle = "\x1b[0m"
-const boldStyle = "\x1b[1m"
-const redColor = "\x1b[91m"
-const greenColor = "\x1b[32m"
-const yellowColor = "\x1b[33m"
-const cyanColor = "\x1b[36m"
-const grayColor = "\x1b[90m"
-const lightGrayColor = "\x1b[37m"
-
-type cursorStateType int
-
-const (
- cursorStateTop cursorStateType = iota
- cursorStateStreaming
- cursorStateMidBlock
- cursorStateEndBlock
-)
-
-type Stenographer interface {
- AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
- AnnounceAggregatedParallelRun(nodes int, succinct bool)
- AnnounceParallelRun(node int, nodes int, succinct bool)
- AnnounceTotalNumberOfSpecs(total int, succinct bool)
- AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
- AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
-
- AnnounceSpecWillRun(spec *types.SpecSummary)
- AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
- AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
-
- AnnounceCapturedOutput(output string)
-
- AnnounceSuccessfulSpec(spec *types.SpecSummary)
- AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
- AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
-
- AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
- AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
-
- AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
- AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
- AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
-
- SummarizeFailures(summaries []*types.SpecSummary)
-}
-
-func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
- denoter := "•"
- if runtime.GOOS == "windows" {
- denoter = "+"
- }
- return &consoleStenographer{
- color: color,
- denoter: denoter,
- cursorState: cursorStateTop,
- enableFlakes: enableFlakes,
- w: writer,
- }
-}
-
-type consoleStenographer struct {
- color bool
- denoter string
- cursorState cursorStateType
- enableFlakes bool
- w io.Writer
-}
-
-var alternatingColors = []string{defaultStyle, grayColor}
-
-func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
- if succinct {
- s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
- return
- }
- s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
- s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
- if randomizingAll {
- s.print(0, " - Will randomize all specs")
- }
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
- if succinct {
- s.print(0, "- node #%d ", node)
- return
- }
- s.println(0,
- "Parallel test node %s/%s.",
- s.colorize(boldStyle, "%d", node),
- s.colorize(boldStyle, "%d", nodes),
- )
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
- if succinct {
- s.print(0, "- %d nodes ", nodes)
- return
- }
- s.println(0,
- "Running in parallel across %s nodes",
- s.colorize(boldStyle, "%d", nodes),
- )
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
- if succinct {
- s.print(0, "- %d/%d specs ", specsToRun, total)
- s.stream()
- return
- }
- s.println(0,
- "Will run %s of %s specs",
- s.colorize(boldStyle, "%d", specsToRun),
- s.colorize(boldStyle, "%d", total),
- )
-
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
- if succinct {
- s.print(0, "- %d specs ", total)
- s.stream()
- return
- }
- s.println(0,
- "Will run %s specs",
- s.colorize(boldStyle, "%d", total),
- )
-
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
- if succinct && summary.SuiteSucceeded {
- s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
- return
- }
- s.printNewLine()
- color := greenColor
- if !summary.SuiteSucceeded {
- color = redColor
- }
- s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
-
- status := ""
- if summary.SuiteSucceeded {
- status = s.colorize(boldStyle+greenColor, "SUCCESS!")
- } else {
- status = s.colorize(boldStyle+redColor, "FAIL!")
- }
-
- flakes := ""
- if s.enableFlakes {
- flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
- }
-
- s.print(0,
- "%s -- %s | %s | %s | %s\n",
- status,
- s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
- s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
- s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
- s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
- )
-}
-
-func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
- s.startBlock()
- for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
- s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
- }
-
- indentation := 0
- if len(spec.ComponentTexts) > 2 {
- indentation = 1
- s.printNewLine()
- }
- index := len(spec.ComponentTexts) - 1
- s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
- s.printNewLine()
- s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
- s.printNewLine()
- s.midBlock()
-}
-
-func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.startBlock()
- var message string
- switch summary.State {
- case types.SpecStateFailed:
- message = "Failure"
- case types.SpecStatePanicked:
- message = "Panic"
- case types.SpecStateTimedOut:
- message = "Timeout"
- }
-
- s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
-
- s.printNewLine()
- s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
-
- s.endBlock()
-}
-
-func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
- if output == "" {
- return
- }
-
- s.startBlock()
- s.println(0, output)
- s.midBlock()
-}
-
-func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
- s.print(0, s.colorize(greenColor, s.denoter))
- s.stream()
-}
-
-func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- s.printBlockWithMessage(
- s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
- "",
- spec,
- succinct,
- )
-}
-
-func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
- s.printBlockWithMessage(
- s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
- s.measurementReport(spec, succinct),
- spec,
- succinct,
- )
-}
-
-func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
- if noisy {
- s.printBlockWithMessage(
- s.colorize(yellowColor, "P [PENDING]"),
- "",
- spec,
- false,
- )
- } else {
- s.print(0, s.colorize(yellowColor, "P"))
- s.stream()
- }
-}
-
-func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- // Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
- if succinct || spec.Failure == (types.SpecFailure{}) {
- s.print(0, s.colorize(cyanColor, "S"))
- s.stream()
- } else {
- s.startBlock()
- s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
-
- s.printNewLine()
- s.printSkip(indentation, spec.Failure)
- s.endBlock()
- }
-}
-
-func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
- failingSpecs := []*types.SpecSummary{}
-
- for _, summary := range summaries {
- if summary.HasFailureState() {
- failingSpecs = append(failingSpecs, summary)
- }
- }
-
- if len(failingSpecs) == 0 {
- return
- }
-
- s.printNewLine()
- s.printNewLine()
- plural := "s"
- if len(failingSpecs) == 1 {
- plural = ""
- }
- s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
- for _, summary := range failingSpecs {
- s.printNewLine()
- if summary.HasFailureState() {
- if summary.TimedOut() {
- s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
- } else if summary.Panicked() {
- s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
- } else if summary.Failed() {
- s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
- }
- s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
- s.printNewLine()
- s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
- }
- }
-}
-
-func (s *consoleStenographer) startBlock() {
- if s.cursorState == cursorStateStreaming {
- s.printNewLine()
- s.printDelimiter()
- } else if s.cursorState == cursorStateMidBlock {
- s.printNewLine()
- }
-}
-
-func (s *consoleStenographer) midBlock() {
- s.cursorState = cursorStateMidBlock
-}
-
-func (s *consoleStenographer) endBlock() {
- s.printDelimiter()
- s.cursorState = cursorStateEndBlock
-}
-
-func (s *consoleStenographer) stream() {
- s.cursorState = cursorStateStreaming
-}
-
-func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
- s.startBlock()
- s.println(0, header)
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
-
- if message != "" {
- s.printNewLine()
- s.println(indentation, message)
- }
-
- s.endBlock()
-}
-
-func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.startBlock()
- s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
-
- s.printNewLine()
- s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
- s.endBlock()
-}
-
-func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
- switch failedComponentType {
- case types.SpecComponentTypeBeforeSuite:
- return " in Suite Setup (BeforeSuite)"
- case types.SpecComponentTypeAfterSuite:
- return " in Suite Teardown (AfterSuite)"
- case types.SpecComponentTypeBeforeEach:
- return " in Spec Setup (BeforeEach)"
- case types.SpecComponentTypeJustBeforeEach:
- return " in Spec Setup (JustBeforeEach)"
- case types.SpecComponentTypeAfterEach:
- return " in Spec Teardown (AfterEach)"
- }
-
- return ""
-}
-
-func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
- s.println(indentation, s.colorize(cyanColor, spec.Message))
- s.printNewLine()
- s.println(indentation, spec.Location.String())
-}
-
-func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
- if state == types.SpecStatePanicked {
- s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
- s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
- s.println(indentation, failure.Location.String())
- s.printNewLine()
- s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
- s.println(indentation, failure.Location.FullStackTrace)
- } else {
- s.println(indentation, s.colorize(redColor, failure.Message))
- s.printNewLine()
- s.println(indentation, failure.Location.String())
- if fullTrace {
- s.printNewLine()
- s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
- s.println(indentation, failure.Location.FullStackTrace)
- }
- }
-}
-
-func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
- startIndex := 1
- indentation := 0
-
- if len(componentTexts) == 1 {
- startIndex = 0
- }
-
- for i := startIndex; i < len(componentTexts); i++ {
- if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
- color := redColor
- if state == types.SpecStateSkipped {
- color = cyanColor
- }
- blockType := ""
- switch failedComponentType {
- case types.SpecComponentTypeBeforeSuite:
- blockType = "BeforeSuite"
- case types.SpecComponentTypeAfterSuite:
- blockType = "AfterSuite"
- case types.SpecComponentTypeBeforeEach:
- blockType = "BeforeEach"
- case types.SpecComponentTypeJustBeforeEach:
- blockType = "JustBeforeEach"
- case types.SpecComponentTypeAfterEach:
- blockType = "AfterEach"
- case types.SpecComponentTypeIt:
- blockType = "It"
- case types.SpecComponentTypeMeasure:
- blockType = "Measurement"
- }
- if succinct {
- s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
- } else {
- s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
- s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
- }
- } else {
- if succinct {
- s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
- } else {
- s.println(indentation, componentTexts[i])
- s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
- }
- }
- indentation++
- }
-
- return indentation
-}
-
-func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
- indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
-
- if succinct {
- if len(componentTexts) > 0 {
- s.printNewLine()
- s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
- }
- s.printNewLine()
- indentation = 1
- } else {
- indentation--
- }
-
- return indentation
-}
-
-func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
- orderedKeys := make([]string, len(measurements))
- for key, measurement := range measurements {
- orderedKeys[measurement.Order] = key
- }
- return orderedKeys
-}
-
-func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
- if len(spec.Measurements) == 0 {
- return "Found no measurements"
- }
-
- message := []string{}
- orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
-
- if succinct {
- message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
- for _, key := range orderedKeys {
- measurement := spec.Measurements[key]
- message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
- s.colorize(boldStyle, "%s", measurement.Name),
- measurement.SmallestLabel,
- s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
- measurement.Units,
- measurement.AverageLabel,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
- measurement.Units,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
- measurement.Units,
- measurement.LargestLabel,
- s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
- measurement.Units,
- ))
- }
- } else {
- message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
- for _, key := range orderedKeys {
- measurement := spec.Measurements[key]
- info := ""
- if measurement.Info != nil {
- message = append(message, fmt.Sprintf("%v", measurement.Info))
- }
-
- message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
- s.colorize(boldStyle, "%s", measurement.Name),
- info,
- measurement.SmallestLabel,
- s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
- measurement.Units,
- measurement.LargestLabel,
- s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
- measurement.Units,
- measurement.AverageLabel,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
- measurement.Units,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
- measurement.Units,
- ))
- }
- }
-
- return strings.Join(message, "\n")
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
deleted file mode 100644
index e84226a73..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# go-colorable
-
-Colorable writer for windows.
-
-For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
-This package is possible to handle escape sequence for ansi color on windows.
-
-## Too Bad!
-
-![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
-
-
-## So Good!
-
-![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
-
-## Usage
-
-```go
-logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
-logrus.SetOutput(colorable.NewColorableStdout())
-
-logrus.Info("succeeded")
-logrus.Warn("not correct")
-logrus.Error("something error")
-logrus.Fatal("panic")
-```
-
-You can compile above code on non-windows OSs.
-
-## Installation
-
-```
-$ go get github.com/mattn/go-colorable
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
deleted file mode 100644
index 52d6653b3..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !windows
-
-package colorable
-
-import (
- "io"
- "os"
-)
-
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- return file
-}
-
-func NewColorableStdout() io.Writer {
- return os.Stdout
-}
-
-func NewColorableStderr() io.Writer {
- return os.Stderr
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
deleted file mode 100644
index fb976dbd8..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package colorable
-
-import (
- "bytes"
- "fmt"
- "io"
-)
-
-type NonColorable struct {
- out io.Writer
- lastbuf bytes.Buffer
-}
-
-func NewNonColorable(w io.Writer) io.Writer {
- return &NonColorable{out: w}
-}
-
-func (w *NonColorable) Write(data []byte) (n int, err error) {
- er := bytes.NewBuffer(data)
-loop:
- for {
- c1, _, err := er.ReadRune()
- if err != nil {
- break loop
- }
- if c1 != 0x1b {
- fmt.Fprint(w.out, string(c1))
- continue
- }
- c2, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- break loop
- }
- if c2 != 0x5b {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- continue
- }
-
- var buf bytes.Buffer
- for {
- c, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- w.lastbuf.Write(buf.Bytes())
- break loop
- }
- if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
- break
- }
- buf.Write([]byte(string(c)))
- }
- }
- return len(data) - w.lastbuf.Len(), nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
deleted file mode 100644
index 74845de4a..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# go-isatty
-
-isatty for golang
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/mattn/go-isatty"
- "os"
-)
-
-func main() {
- if isatty.IsTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Terminal")
- } else {
- fmt.Println("Is Not Terminal")
- }
-}
-```
-
-## Installation
-
-```
-$ go get github.com/mattn/go-isatty
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
deleted file mode 100644
index 17d4f90eb..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package isatty implements interface to isatty
-package isatty
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
deleted file mode 100644
index 83c588773..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build appengine
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on on appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
deleted file mode 100644
index 98ffe86a4..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build darwin freebsd openbsd netbsd
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
deleted file mode 100644
index 9d24bac1d..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build linux
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
deleted file mode 100644
index 1f0c6bf53..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build solaris
-// +build !appengine
-
-package isatty
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func IsTerminal(fd uintptr) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
- return err == nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
deleted file mode 100644
index 83c398b16..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build windows
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
deleted file mode 100644
index 84fd8aff8..000000000
--- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
-
-TeamCity Reporter for Ginkgo
-
-Makes use of TeamCity's support for Service Messages
-http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
-*/
-
-package reporters
-
-import (
- "fmt"
- "io"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-const (
- messageId = "##teamcity"
-)
-
-type TeamCityReporter struct {
- writer io.Writer
- testSuiteName string
- ReporterConfig config.DefaultReporterConfigType
-}
-
-func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
- return &TeamCityReporter{
- writer: writer,
- }
-}
-
-func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.testSuiteName = escape(summary.SuiteDescription)
- fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName)
-}
-
-func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("BeforeSuite", setupSummary)
-}
-
-func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("AfterSuite", setupSummary)
-}
-
-func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- testName := escape(name)
- fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
- message := reporter.failureMessage(setupSummary.Failure)
- details := reporter.failureDetails(setupSummary.Failure)
- fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
- durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
- fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
- }
-}
-
-func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
- testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
- fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
-}
-
-func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
-
- if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
- details := escape(specSummary.CapturedOutput)
- fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details)
- }
- if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
- message := reporter.failureMessage(specSummary.Failure)
- details := reporter.failureDetails(specSummary.Failure)
- fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
- }
- if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
- fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName)
- }
-
- durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
- fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
-}
-
-func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName)
-}
-
-func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string {
- return escape(failure.ComponentCodeLocation.String())
-}
-
-func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string {
- return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String()))
-}
-
-func escape(output string) string {
- output = strings.Replace(output, "|", "||", -1)
- output = strings.Replace(output, "'", "|'", -1)
- output = strings.Replace(output, "\n", "|n", -1)
- output = strings.Replace(output, "\r", "|r", -1)
- output = strings.Replace(output, "[", "|[", -1)
- output = strings.Replace(output, "]", "|]", -1)
- return output
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go
deleted file mode 100644
index 935a89e13..000000000
--- a/vendor/github.com/onsi/ginkgo/types/code_location.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package types
-
-import (
- "fmt"
-)
-
-type CodeLocation struct {
- FileName string
- LineNumber int
- FullStackTrace string
-}
-
-func (codeLocation CodeLocation) String() string {
- return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go
deleted file mode 100644
index fdd6ed5bd..000000000
--- a/vendor/github.com/onsi/ginkgo/types/synchronization.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package types
-
-import (
- "encoding/json"
-)
-
-type RemoteBeforeSuiteState int
-
-const (
- RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
-
- RemoteBeforeSuiteStatePending
- RemoteBeforeSuiteStatePassed
- RemoteBeforeSuiteStateFailed
- RemoteBeforeSuiteStateDisappeared
-)
-
-type RemoteBeforeSuiteData struct {
- Data []byte
- State RemoteBeforeSuiteState
-}
-
-func (r RemoteBeforeSuiteData) ToJSON() []byte {
- data, _ := json.Marshal(r)
- return data
-}
-
-type RemoteAfterSuiteData struct {
- CanRun bool
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go
deleted file mode 100644
index c143e02d8..000000000
--- a/vendor/github.com/onsi/ginkgo/types/types.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package types
-
-import (
- "strconv"
- "time"
-)
-
-const GINKGO_FOCUS_EXIT_CODE = 197
-
-/*
-SuiteSummary represents the a summary of the test suite and is passed to both
-Reporter.SpecSuiteWillBegin
-Reporter.SpecSuiteDidEnd
-
-this is unfortunate as these two methods should receive different objects. When running in parallel
-each node does not deterministically know how many specs it will end up running.
-
-Unfortunately making such a change would break backward compatibility.
-
-Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields
-with -1.
-*/
-type SuiteSummary struct {
- SuiteDescription string
- SuiteSucceeded bool
- SuiteID string
-
- NumberOfSpecsBeforeParallelization int
- NumberOfTotalSpecs int
- NumberOfSpecsThatWillBeRun int
- NumberOfPendingSpecs int
- NumberOfSkippedSpecs int
- NumberOfPassedSpecs int
- NumberOfFailedSpecs int
- // Flaked specs are those that failed initially, but then passed on a
- // subsequent try.
- NumberOfFlakedSpecs int
- RunTime time.Duration
-}
-
-type SpecSummary struct {
- ComponentTexts []string
- ComponentCodeLocations []CodeLocation
-
- State SpecState
- RunTime time.Duration
- Failure SpecFailure
- IsMeasurement bool
- NumberOfSamples int
- Measurements map[string]*SpecMeasurement
-
- CapturedOutput string
- SuiteID string
-}
-
-func (s SpecSummary) HasFailureState() bool {
- return s.State.IsFailure()
-}
-
-func (s SpecSummary) TimedOut() bool {
- return s.State == SpecStateTimedOut
-}
-
-func (s SpecSummary) Panicked() bool {
- return s.State == SpecStatePanicked
-}
-
-func (s SpecSummary) Failed() bool {
- return s.State == SpecStateFailed
-}
-
-func (s SpecSummary) Passed() bool {
- return s.State == SpecStatePassed
-}
-
-func (s SpecSummary) Skipped() bool {
- return s.State == SpecStateSkipped
-}
-
-func (s SpecSummary) Pending() bool {
- return s.State == SpecStatePending
-}
-
-type SetupSummary struct {
- ComponentType SpecComponentType
- CodeLocation CodeLocation
-
- State SpecState
- RunTime time.Duration
- Failure SpecFailure
-
- CapturedOutput string
- SuiteID string
-}
-
-type SpecFailure struct {
- Message string
- Location CodeLocation
- ForwardedPanic string
-
- ComponentIndex int
- ComponentType SpecComponentType
- ComponentCodeLocation CodeLocation
-}
-
-type SpecMeasurement struct {
- Name string
- Info interface{}
- Order int
-
- Results []float64
-
- Smallest float64
- Largest float64
- Average float64
- StdDeviation float64
-
- SmallestLabel string
- LargestLabel string
- AverageLabel string
- Units string
- Precision int
-}
-
-func (s SpecMeasurement) PrecisionFmt() string {
- if s.Precision == 0 {
- return "%f"
- }
-
- str := strconv.Itoa(s.Precision)
-
- return "%." + str + "f"
-}
-
-type SpecState uint
-
-const (
- SpecStateInvalid SpecState = iota
-
- SpecStatePending
- SpecStateSkipped
- SpecStatePassed
- SpecStateFailed
- SpecStatePanicked
- SpecStateTimedOut
-)
-
-func (state SpecState) IsFailure() bool {
- return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
-}
-
-type SpecComponentType uint
-
-const (
- SpecComponentTypeInvalid SpecComponentType = iota
-
- SpecComponentTypeContainer
- SpecComponentTypeBeforeSuite
- SpecComponentTypeAfterSuite
- SpecComponentTypeBeforeEach
- SpecComponentTypeJustBeforeEach
- SpecComponentTypeJustAfterEach
- SpecComponentTypeAfterEach
- SpecComponentTypeIt
- SpecComponentTypeMeasure
-)
-
-type FlagType uint
-
-const (
- FlagTypeNone FlagType = iota
- FlagTypeFocused
- FlagTypePending
-)
diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore
similarity index 90%
rename from vendor/github.com/onsi/ginkgo/.gitignore
rename to vendor/github.com/onsi/ginkgo/v2/.gitignore
index b9f9659d2..18793c248 100644
--- a/vendor/github.com/onsi/ginkgo/.gitignore
+++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore
@@ -4,4 +4,4 @@ tmp/**/*
*.coverprofile
.vscode
.idea/
-*.log
+*.log
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
new file mode 100644
index 000000000..f4671ec1c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -0,0 +1,828 @@
+## 2.9.7
+
+### Fixes
+- fix race when multiple defercleanups are called in goroutines [07fc3a0]
+
+## 2.9.6
+
+### Fixes
+- fix: create parent directory before report files (#1212) [0ac65de]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231]
+
+## 2.9.5
+
+### Fixes
+- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b]
+
+### Maintenance
+- fix generators link (#1200) [9f9d8b9]
+- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2]
+- fix spelling err in docs (#1199) [0013b1a]
+- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5]
+
+## 2.9.4
+
+### Fixes
+- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit.
+
+- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite.
+
+
+### Maintenance
+- Document run order when multiple setup nodes are at the same nesting level [903be81]
+
+## 2.9.3
+
+### Features
+- Add RenderTimeline to GinkgoT() [c0c77b6]
+
+### Fixes
+- update Measure deprecation message. fixes #1176 [227c662]
+- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c]
+
+### Maintenance
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4]
+- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793]
+- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b]
+- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64]
+- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557]
+- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5]
+
+## 2.9.2
+
+### Maintenance
+- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf]
+- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe]
+
+## 2.9.1
+
+### Fixes
+This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995)
+- Support -coverpkg=./... [26ca1b5]
+- document coverpkg a bit more clearly [fc44c3b]
+
+### Maintenance
+- bump various dependencies
+- Improve Documentation and fix typo (#1158) [93de676]
+
+## 2.9.0
+
+### Features
+- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe]
+
+- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b]
+
+ The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality.
+
+## 2.8.4
+
+### Features
+- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2]
+- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589]
+
+### Fixes
+- rename tools hack to see if it fixes things for downstream users [a8bb39a]
+
+### Maintenance
+- Bump golang.org/x/text (#1144) [41b2a8a]
+- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583]
+
+## 2.8.3
+
+Released to fix security issue in golang.org/x/net dependency
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e]
+- remove tools.go hack from documentation [0718693]
+
+## 2.8.2
+
+Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod.
+
+### Maintenance
+
+- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a]
+- Fix minor typos (#1138) [e1e9723]
+- Fix link in V2 Migration Guide (#1137) [a588f60]
+
+## 2.8.1
+
+### Fixes
+- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a]
+- don't run ReportEntries through sprintf [febbe38]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860]
+- test: update matrix for Go 1.20 (#1130) [4890a62]
+- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638]
+- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd]
+- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649]
+- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042]
+- Update index.md with instructions on how to upgrade Ginkgo [833a75e]
+
+## 2.8.0
+
+### Features
+
+- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556]
+
+Modeled after `testing.T.Helper()`. Now, rather than write code like:
+
+```go
+func helper(model Model) {
+ Expect(model).WithOffset(1).To(BeValid())
+ Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write:
+
+```go
+func helper(model Model) {
+ GinkgoHelper()
+ Expect(model).To(BeValid())
+ Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c]
+
+You can now write code like this:
+
+```go
+BeforeSuite(func() {
+ if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do slow setup
+ }
+
+ if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do fast setup
+ }
+})
+```
+
+to programmatically check whether a given set of labels will match the configured `--label-filter`.
+
+### Maintenance
+
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e]
+- cdeql: add ruby language (#1124) [9dd275b]
+- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd]
+
+## 2.7.1
+
+### Fixes
+- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6]
+- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2]
+- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa]
+- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480]
+
+## 2.7.0
+
+### Features
+- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
+- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
+- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
+- Color aliases for custom color support (#1101) [49fab7a]
+
+### Fixes
+- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
+- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
+- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
+
+## 2.6.1
+
+### Features
+- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1]
+
+### Fixes
+- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823]
+- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e]
+
+## 2.6.0
+
+### Features
+- `ReportBeforeSuite` provides access to the suite report before the suite begins.
+- Add junit config option for omitting leafnodetype (#1088) [956e6d2]
+- Add support to customize junit report config to omit spec labels (#1087) [de44005]
+
+### Fixes
+- Fix stack trace pruning so that it has a chance of working on windows [2165648]
+
+## 2.5.1
+
+### Fixes
+- skipped tests only show as 'S' when running with -v [3ab38ae]
+- Fix typo in docs/index.md (#1082) [55fc58d]
+- Fix typo in docs/index.md (#1081) [8a14f1f]
+- Fix link notation in docs/index.md (#1080) [2669612]
+- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
+
+### Maintenance
+- chore: Included githubactions in the dependabot config (#976) [baea341]
+- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
+
+## 2.5.0
+
+### Ginkgo output now includes a timeline-view of the spec
+
+This commit changes Ginkgo's default output. Spec details are now
+presented as a **timeline** that includes events that occur during the spec
+lifecycle interleaved with any GinkgoWriter content. This makes is much easier
+to understand the flow of a spec and where a given failure occurs.
+
+The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
+and the SuppressProgressReporting decorator have all been deprecated. Instead
+the existing -v and -vv flags better capture the level of verbosity to display. However,
+a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
+in the spec timeline.
+
+In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
+reports can be configured and generated using
+`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
+
+Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
+was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
+to build tooling on top of as it has stronger guarantees to be stable from version to version.
+
+### Features
+- Provide details about which timeout expired [0f2fa27]
+
+### Fixes
+- Add Support Policy to docs [c70867a]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
+
+## 2.4.0
+
+### Features
+
+- DeferCleanup supports functions with multiple-return values [5e33c75]
+- Add GinkgoLogr (#1067) [bf78c28]
+- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f]
+
+### Fixes
+- correcting some typos (#1064) [1403d3c]
+- fix flaky internal_integration interrupt specs [2105ba3]
+- Correct busted link in README [be6b5b9]
+
+### Maintenance
+- Bump actions/checkout from 2 to 3 (#1062) [8a2f483]
+- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8]
+- Bump github/codeql-action from 1 to 2 (#1061) [da09146]
+- Bump actions/setup-go from 2 to 3 (#1060) [918040d]
+- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122]
+- Add GHA to dependabot config [4442772]
+
+## 2.3.1
+
+## Fixes
+Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository).
+
+With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message.
+
+- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f]
+- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8]
+
+### Maintenance
+- bump gomega to v1.22.0 [822a937]
+
+## 2.3.0
+
+### Interruptible Nodes and Timeouts
+
+Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this:
+
+```go
+It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid.
+ // do things until `ctx.Done()` is closed, for example:
+ req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+
+ Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17))
+}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second))
+```
+
+and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline.
+
+### Features
+
+- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures.
+- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested.
+- DescribeTable now exits with an error if it is not passed any Entries [a4c9865]
+
+## Fixes
+- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5]
+- Make the outline command able to use the DSL import [1be2427]
+
+## Maintenance
+- chore(docs): delete no meaning d [57c373c]
+- chore(docs): Fix hyperlinks [30526d5]
+- chore(docs): fix code blocks without language settings [cf611c4]
+- fix intra-doc link [b541bcb]
+
+## 2.2.0
+
+### Generate real-time Progress Reports [f91377c]
+
+Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines.
+
+These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`.
+
+In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators.
+
+Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports.
+
+Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously.
+
+### Features
+- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
+
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+
+### Maintenance
+- Modernize the invocation of Ginkgo in github actions [0ffde58]
+- Update reocmmended CI settings in docs [896bbb9]
+- Speed up unnecessarily slow integration test [6d3a90e]
+
+## 2.1.6
+
+### Fixes
+- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a]
+- chore: remove duplicate word in comments [7373214]
+
+## 2.1.5
+
+### Fixes
+- drop -mod=mod instructions; fixes #1026 [6ad7138]
+- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b]
+- remove stale importmap gcflags flag test [3cd8b93]
+- Always emit spec summary [5cf23e2] - even when only one spec has failed
+- Fix ReportAfterSuite usage in docs [b1864ad]
+- fixed typo (#997) [219cc00]
+- TrimRight is not designed to trim Suffix [71ebb74]
+- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208]
+- fix syntax in examples (#975) [b69554f]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4]
+- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a]
+- test: add new Go 1.19 to test matrix (#1014) [bbefe12]
+- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906]
+- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96]
+- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa]
+
+## 2.1.4
+
+### Fixes
+- Numerous documentation typos
+- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903]
+- improve error message when a parallel process fails to report back [a7bd1fe]
+- guard against concurrent map writes in DeprecationTracker [0976569]
+- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480]
+- Fix ginkgo import circle [f779385]
+
+## 2.1.3
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Calling By in a container node now emits a useful error. [ff12cee]
+
+## 2.1.2
+
+### Fixes
+
+- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1]
+- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02]
+- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them!
+
+## 2.1.1
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17]
+
+## 2.1.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+2.1.0 is a minor release with a few tweaks:
+
+- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo).
+- Add error check for invalid/nil parameters to DescribeTable [6f8577e]
+- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e]
+
+## 2.0.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)
+
+## 1.16.5
+
+Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
+1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
+
+You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
+
+## 1.16.4
+
+### Fixes
+1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
+
+## 1.16.3
+
+### Features
+- Measure is now deprecated and emits a deprecation warning.
+
+## 1.16.2
+
+### Fixes
+- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=` environment variable.
+
+## 1.16.1
+
+### Fixes
+- Suppress --stream deprecation warning on windows (#793)
+
+## 1.16.0
+
+### Features
+- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913]
+ - Update README.md to advertise that Ginkgo 2.0 is coming.
+ - Backport the 2.0 DeprecationTracker and start alerting users
+ about upcoming deprecations.
+
+- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
+
+- Fix accidental reference to 1488 (#784) [9fb7fe4]
+
+## 1.15.2
+
+### Fixes
+- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0]
+
+## 1.15.1
+
+### Fixes
+- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
+
+## 1.15.0
+
+### Features
+- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583]
+- Add support for using template to generate tests (#752) [efb9e69]
+- Add a Chinese Doc #755 (#756) [5207632]
+- cli: allow multiple -focus and -skip flags (#736) [9a782fb]
+
+### Fixes
+- Add _internal to filename of tests created with internal flag (#751) [43c12da]
+
+## 1.14.2
+
+### Fixes
+- correct handling windows backslash in import path (#721) [97f3d51]
+- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d]
+
+## 1.14.1
+
+### Fixes
+- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
+
+## 1.14.0
+
+### Features
+- Defer running top-level container nodes until RunSpecs is called [d44dedf]
+- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle)
+- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692))
+- Print Skip reason in JUnit reporter if one was provided [820dfab]
+
+## 1.13.0
+
+### Features
+- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2]
+
+### Fixes
+- Ensure integration tests pass in an environment sans GOPATH [606fba2]
+- Add books package (#568) [fc0e44e]
+- doc(readme): installation via "tools package" (#677) [83bb20e]
+- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75]
+- Import package without dot (#687) [6321024]
+- Fix integration tests to stop require GOPATH (#686) [a912ec5]
+
+## 1.12.3
+
+### Fixes
+- Print correct code location of failing table test (#666) [c6d7afb]
+
+## 1.12.2
+
+### Fixes
+- Update dependencies [ea4a036]
+
+## 1.12.1
+
+### Fixes
+- Make unfocus ("blur") much faster (#674) [8b18061]
+- Fix typo (#673) [7fdcbe8]
+- Test against 1.14 and remove 1.12 [d5c2ad6]
+- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
+- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
+- improve ginkgo performance - makes progress on #644 [a14f98e]
+- fix convert integration tests [1f8ba69]
+- fix typo successful -> successful (#663) [1ea49cf]
+- Fix invalid link (#658) [b886136]
+- convert utility : Include comments from source (#657) [1077c6d]
+- Explain what BDD means [d79e7fb]
+- skip race detector test on unsupported platform (#642) [f8ab89d]
+- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
+- Fix missing newline in combined coverage file (#641) [6a07ea2]
+- check if a spec is run before returning SpecSummary (#645) [8850000]
+
+## 1.12.0
+
+### Features
+- Add module definition (#630) [78916ab]
+
+## 1.11.0
+
+### Features
+- Add syscall for riscv64 architecture [f66e896]
+- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
+- teamcity reporter: output newline after every service message (#625) [3cfa02d]
+- Add support for go module when running `generate` command (#578) [9c89e3f]
+
+## 1.10.3
+
+### Fixes
+- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
+- Add integration test [d90e0dc]
+- Fix coverage files combining [e5dde8c]
+- A new CLI option: -ginkgo.reportFile (#601) [034fd25]
+
+## 1.10.2
+
+### Fixes
+- speed up table entry generateIt() (#609) [5049dc5]
+- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
+
+## 1.10.1
+
+### Fixes
+- stack backtrace: fix skipping (#600) [2a4c0bd]
+
+## 1.10.0
+
+### Fixes
+- stack backtrace: fix alignment and skipping [66915d6]
+- fix typo in documentation [8f97b93]
+
+## 1.9.0
+
+### Features
+- Option to print output into report, when tests have passed [0545415]
+
+### Fixes
+- Fixed typos in comments [0ecbc58]
+- gofmt code [a7f8bfb]
+- Simplify code [7454d00]
+- Simplify concatenation, incrementation and function assignment [4825557]
+- Avoid unnecessary conversions [9d9403c]
+- JUnit: include more detailed information about panic [19cca4b]
+- Print help to stdout when the user asks for help [4cb7441]
+
+
+## 1.8.0
+
+### New Features
+- allow config of the vet flag for `go test` (#562) [3cd45fa]
+- Support projects using go modules [d56ee76]
+
+### Fixes and Minor Improvements
+- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
+- Optimize focus to avoid allocations [f493786]
+- Ensure generated test file names are underscored [505cc35]
+
+## 1.7.0
+
+### New Features
+- Add JustAfterEach (#484) [0d4f080]
+
+### Fixes
+- Correctly round suite time in junit reporter [2445fc1]
+- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
+
+## 1.6.0
+
+### New Features
+- add --debug flag to emit node output to files (#499) [39febac]
+
+### Fixes
+- fix: for `go vet` to pass [69338ec]
+- docs: fix for contributing instructions [7004cb1]
+- consolidate and streamline contribution docs (#494) [d848015]
+- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
+- all: gofmt [000d317]
+- Increase eventually timeout to 30s [c73579c]
+- Clarify asynchronous test behavior [294d8f4]
+- Travis badge should only show master [26d2143]
+
+## 1.5.0 5/10/2018
+
+### New Features
+- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
+- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
+- Re-add noisySkippings flag [652e15c]
+- Allow coverage to be displayed for focused specs (#367) [11459a8]
+- Handle -outputdir flag (#364) [228e3a8]
+- Handle -coverprofile flag (#355) [43392d5]
+
+### Fixes
+- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
+- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
+- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
+- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
+- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c]
+- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
+- Add an extra new line after reporting spec run completion for test2json [874520d]
+- added name name field to junit reported testsuite [ae61c63]
+- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
+- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
+- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
+- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
+- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
+- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
+- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
+- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
+- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
+- Replace GOPATH in Environment [4b883f0]
+
+
+## 1.4.0 7/16/2017
+
+- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
+- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
+- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
+- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
+- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
+- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
+
+## 1.3.0 3/28/2017
+
+Improvements:
+
+- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
+- `Skip(message)` can be used to skip the current test.
+- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
+- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
+- Support for retrying flaky tests with `--flakeAttempts`
+- `ginkgo ./...` now recurses as you'd expect
+- Added `Specify` a synonym for `It`
+- Support colorise on Windows
+- Broader support for various go compilation flags in the `ginkgo` CLI
+
+Bug Fixes:
+
+- Ginkgo tests now fail when you `panic(nil)` (#167)
+
+## 1.2.0 5/31/2015
+
+Improvements
+
+- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
+- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
+- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
+
+## 1.2.0-beta
+
+Ginkgo now requires Go 1.4+
+
+Improvements:
+
+- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
+- Improved focus behavior. Now, this:
+
+ ```golang
+ FDescribe("Some describe", func() {
+ It("A", func() {})
+
+ FIt("B", func() {})
+ })
+ ```
+
+ will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
+- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
+- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
+- Improved output when an error occurs in a setup or teardown block.
+- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
+- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
+- Add support for precompiled tests:
+ - `ginkgo build ` will now compile the package, producing a file named `package.test`
+ - The compiled `package.test` file can be run directly. This runs the tests in series.
+ - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
+- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
+- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
+- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
+- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
+- `ginkgo -notify` now works on Linux
+
+Bug Fixes:
+
+- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
+- Fix tempfile leak when running in parallel
+- Fix incorrect failure message when a panic occurs during a parallel test run
+- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
+- Be more consistent about handling SIGTERM as well as SIGINT
+- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
+- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
+
+## 1.1.0 (8/2/2014)
+
+No changes, just dropping the beta.
+
+## 1.1.0-beta (7/22/2014)
+New Features:
+
+- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
+- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
+- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
+- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
+- `ginkgo --failFast` aborts the test suite after the first failure.
+- `ginkgo generate file_1 file_2` can take multiple file arguments.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
+- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
+
+Improvements:
+
+- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
+- `ginkgo --untilItFails` no longer recompiles between attempts.
+- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
+
+Bug Fixes:
+
+- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
+- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
+
+## 1.0.0 (5/24/2014)
+New Features:
+
+- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
+
+Improvements:
+
+- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
+- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
+
+Bug Fixes:
+
+- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
+- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
+- Fix all remaining race conditions in Ginkgo's test suite.
+
+## 1.0.0-beta (4/14/2014)
+Breaking changes:
+
+- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
+- Modified the Reporter interface
+- `watch` is now a subcommand, not a flag.
+
+DSL changes:
+
+- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
+- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
+- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
+
+CLI changes:
+
+- `watch` is now a subcommand, not a flag
+- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
+- Additional arguments can be passed to specs. Pass them after the `--` separator
+- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
+- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Ginkgo's internal to `internal`
+- Rename `example` everywhere to `spec`
+- Much more!
diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
new file mode 100644
index 000000000..1da92fe7e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to Ginkgo
+
+Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
+
+- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
+- Ensure adequate test coverage:
+ - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
+ - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
+- Make sure all the tests succeed via `ginkgo -r -p`
+- Vet your changes via `go vet ./...`
+- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes.
+
+Thanks for supporting Ginkgo!
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE
similarity index 100%
rename from vendor/github.com/onsi/ginkgo/LICENSE
rename to vendor/github.com/onsi/ginkgo/v2/LICENSE
diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md
new file mode 100644
index 000000000..d0473a467
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -0,0 +1,115 @@
+![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png)
+
+[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+
+---
+
+# Ginkgo
+
+Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
+
+```go
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ ...
+)
+
+Describe("Checking books out of the library", Label("library"), func() {
+ var library *libraries.Library
+ var book *books.Book
+ var valjean *users.User
+ BeforeEach(func() {
+ library = libraries.NewClient()
+ book = &books.Book{
+ Title: "Les Miserables",
+ Author: "Victor Hugo",
+ }
+ valjean = users.NewUser("Jean Valjean")
+ })
+
+ When("the library has the book in question", func() {
+ BeforeEach(func(ctx SpecContext) {
+ Expect(library.Store(ctx, book)).To(Succeed())
+ })
+
+ Context("and the book is available", func() {
+ It("lends it to the reader", func(ctx SpecContext) {
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books()).To(ContainElement(book))
+ Expect(library.UserWithBook(ctx, book)).To(Equal(valjean))
+ }, SpecTimeout(time.Second * 5))
+ })
+
+ Context("but the book has already been checked out", func() {
+ var javert *users.User
+ BeforeEach(func(ctx SpecContext) {
+ javert = users.NewUser("Javert")
+ Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ })
+
+ It("tells the user", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(error).To(MatchError("Les Miserables is currently checked out"))
+ }, SpecTimeout(time.Second * 5))
+
+ It("lets the user place a hold and get notified later", func(ctx SpecContext) {
+ Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Holds(ctx)).To(ContainElement(book))
+
+ By("when Javert returns the book")
+ Expect(javert.Return(ctx, library, book)).To(Succeed())
+
+ By("it eventually informs Valjean")
+ notification := "Les Miserables is ready for pick up"
+ Eventually(ctx, valjean.Notifications).Should(ContainElement(notification))
+
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books(ctx)).To(ContainElement(book))
+ Expect(valjean.Holds(ctx)).To(BeEmpty())
+ }, SpecTimeout(time.Second * 10))
+ })
+ })
+
+ When("the library does not have the book in question", func() {
+ It("tells the reader the book is unavailable", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(error).To(MatchError("Les Miserables is not in the library catalog"))
+ }, SpecTimeout(time.Second * 5))
+ })
+})
+```
+
+Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite).
+
+If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
+
+## Capabilities
+
+Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
+
+With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs).
+
+At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
+
+```bash
+ginkgo -p
+```
+
+By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up.
+
+As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
+
+Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development.
+
+And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers).
+
+Happy Testing!
+
+## License
+
+Ginkgo is MIT-Licensed
+
+## Contributing
+
+See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
similarity index 58%
rename from vendor/github.com/onsi/ginkgo/RELEASING.md
rename to vendor/github.com/onsi/ginkgo/v2/RELEASING.md
index db3d234c1..363815d7c 100644
--- a/vendor/github.com/onsi/ginkgo/RELEASING.md
+++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
@@ -1,13 +1,19 @@
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
1. Ensure CHANGELOG.md is up to date.
- - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
- Categorize the changes into
- Breaking Changes (requires a major version)
- New Features (minor version)
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-1. Update `VERSION` in `config/config.go`
+1. Update `VERSION` in `types/version.go`
1. Commit, push, and release:
```
git commit -m "vM.m.p"
diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
new file mode 100644
index 000000000..a61021d08
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
@@ -0,0 +1,69 @@
+package config
+
+// GinkgoConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type GinkgoConfigType = DeprecatedGinkgoConfigType
+type DeprecatedGinkgoConfigType struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ RegexScansFilePath bool
+ FocusStrings []string
+ SkipStrings []string
+ SkipMeasurements bool
+ FailOnPending bool
+ FailFast bool
+ FlakeAttempts int
+ EmitSpecProgress bool
+ DryRun bool
+ DebugParallel bool
+
+ ParallelNode int
+ ParallelTotal int
+ SyncHost string
+ StreamHost string
+}
+
+// DefaultReporterConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
+type DeprecatedDefaultReporterConfigType struct {
+ NoColor bool
+ SlowSpecThreshold float64
+ NoisyPendings bool
+ NoisySkippings bool
+ Succinct bool
+ Verbose bool
+ FullTrace bool
+ ReportPassed bool
+ ReportFile string
+}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
new file mode 100644
index 000000000..a244bdc18
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -0,0 +1,794 @@
+/*
+Ginkgo is a testing framework for Go designed to help you write expressive tests.
+https://github.com/onsi/ginkgo
+MIT-Licensed
+
+The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to
+build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that.
+You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter.
+
+Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega
+
+You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality
+that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview
+or by running 'ginkgo help'.
+*/
+package ginkgo
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-logr/logr"
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const GINKGO_VERSION = types.VERSION
+
+var flagSet types.GinkgoFlagSet
+var deprecationTracker = types.NewDeprecationTracker()
+var suiteConfig = types.NewDefaultSuiteConfig()
+var reporterConfig = types.NewDefaultReporterConfig()
+var suiteDidRun = false
+var outputInterceptor internal.OutputInterceptor
+var client parallel_support.Client
+
+func init() {
+ var err error
+ flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig)
+ exitIfErr(err)
+ writer := internal.NewWriter(os.Stdout)
+ GinkgoWriter = writer
+ GinkgoLogr = internal.GinkgoLogrFunc(writer)
+}
+
+func exitIfErr(err error) {
+ if err != nil {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ os.Exit(1)
+ }
+}
+
+func exitIfErrors(errors []error) {
+ if len(errors) > 0 {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ for _, err := range errors {
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+}
+
+// The interface implemented by GinkgoWriter
+type GinkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+
+ TeeTo(writer io.Writer)
+ ClearTeeWriters()
+}
+
+/*
+SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo).
+
+You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
+
+Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
+*/
+type SpecContext = internal.SpecContext
+
+/*
+GinkgoWriter implements a GinkgoWriterInterface and io.Writer
+
+When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed
+to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
+only if the current test fails.
+
+GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer).
+Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters()
+
+You can learn more at https://onsi.github.io/ginkgo/#logging-output
+*/
+var GinkgoWriter GinkgoWriterInterface
+
+/*
+GinkgoLogr is a logr.Logger that writes to GinkgoWriter
+*/
+var GinkgoLogr logr.Logger
+
+// The interface by which Ginkgo receives *testing.T
+type GinkgoTestingT interface {
+ Fail()
+}
+
+/*
+GinkgoConfiguration returns the configuration of the current suite.
+
+The first return value is the SuiteConfig which controls aspects of how the suite runs,
+the second return value is the ReporterConfig which controls aspects of how Ginkgo's default
+reporter emits output.
+
+Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need
+to pass in your mutated copies into RunSpecs().
+
+You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite
+*/
+func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) {
+ return suiteConfig, reporterConfig
+}
+
+/*
+GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
+useful for seeding your own pseudorandom number generators to ensure
+consistent executions from run to run, where your tests contain variability (for
+example, when selecting random spec data).
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-randomization
+*/
+func GinkgoRandomSeed() int64 {
+ return suiteConfig.RandomSeed
+}
+
+/*
+GinkgoParallelProcess returns the parallel process number for the current ginkgo process
+The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared
+resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs
+
+For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization
+*/
+func GinkgoParallelProcess() int {
+ return suiteConfig.ParallelProcess
+}
+
+/*
+GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred.
+
+This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega.
+*/
+func GinkgoHelper() {
+ types.MarkAsHelper(1)
+}
+
+/*
+GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`.
+
+You can use this to manually check if a set of labels would satisfy the filter via:
+
+ if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) {
+ //...
+ }
+*/
+func GinkgoLabelFilter() string {
+ suiteConfig, _ := GinkgoConfiguration()
+ return suiteConfig.LabelFilter
+}
+
+/*
+PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
+when running in parallel and output to stdout/stderr is being intercepted. You generally
+don't need to call this function - however there are cases when Ginkgo's output interception
+mechanisms can interfere with external processes launched by the test process.
+
+In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr
+then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter.
+If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process
+then ResumeOutputInterception() after starting it.
+
+Note that PauseOutputInterception() does not cause stdout writes to print to the console -
+this simply stops intercepting and storing stdout writes to an internal buffer.
+*/
+func PauseOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.PauseIntercepting()
+}
+
+// ResumeOutputInterception() - see docs for PauseOutputInterception()
+func ResumeOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.ResumeIntercepting()
+}
+
+/*
+RunSpecs is the entry point for the Ginkgo spec runner.
+
+You must call this within a Golang testing TestX(t *testing.T) function.
+If you bootstrapped your suite with "ginkgo bootstrap" this is already
+done for you.
+
+Ginkgo is typically configured via command-line flags. This configuration
+can be overridden, however, and passed into RunSpecs as optional arguments:
+
+ func TestMySuite(t *testing.T) {
+ RegisterFailHandler(gomega.Fail)
+ // fetch the current config
+ suiteConfig, reporterConfig := GinkgoConfiguration()
+ // adjust it
+ suiteConfig.SkipStrings = []string{"NEVER-RUN"}
+ reporterConfig.FullTrace = true
+ // pass it in to RunSpecs
+ RunSpecs(t, "My Suite", suiteConfig, reporterConfig)
+ }
+
+Note that some configuration changes can lead to undefined behavior. For example,
+you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible
+for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization
+for more on how specs are parallelized in Ginkgo.
+
+You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
+*/
+func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+ if suiteDidRun {
+ exitIfErr(types.GinkgoErrors.RerunningSuite())
+ }
+ suiteDidRun = true
+
+ suiteLabels := Labels{}
+ configErrors := []error{}
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case types.SuiteConfig:
+ suiteConfig = arg
+ case types.ReporterConfig:
+ reporterConfig = arg
+ case Labels:
+ suiteLabels = append(suiteLabels, arg...)
+ default:
+ configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
+ }
+ }
+ exitIfErrors(configErrors)
+
+ configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
+ if len(configErrors) > 0 {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
+ for _, err := range configErrors {
+ fmt.Fprintf(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+
+ var reporter reporters.Reporter
+ if suiteConfig.ParallelTotal == 1 {
+ reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ } else {
+ reporter = reporters.NoopReporter{}
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "swap":
+ outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor()
+ case "none":
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ default:
+ outputInterceptor = internal.NewOutputInterceptor()
+ }
+ client = parallel_support.NewClient(suiteConfig.ParallelHost)
+ if !client.Connect() {
+ client = nil
+ exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost))
+ }
+ defer client.Close()
+ }
+
+ writer := GinkgoWriter.(*internal.Writer)
+ if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 {
+ writer.SetMode(internal.WriterModeStreamAndBuffer)
+ } else {
+ writer.SetMode(internal.WriterModeBufferOnly)
+ }
+
+ if reporterConfig.WillGenerateReport() {
+ registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
+ }
+
+ err := global.Suite.BuildTree()
+ exitIfErr(err)
+
+ suitePath, err := os.Getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ outputInterceptor.Shutdown()
+
+ flagSet.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport())
+ }
+
+ if !passed {
+ t.Fail()
+ }
+
+ if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Println("PASS | FOCUSED")
+ os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+ }
+ return passed
+}
+
+/*
+Skip instructs Ginkgo to skip the current spec
+
+You can call Skip in any Setup or Subject node closure.
+
+For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs
+*/
+func Skip(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Skip(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
+
+Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with
+the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must
+add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail.
+
+You can call Fail in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func Fail(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Fail(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite.
+
+You can call AbortSuite in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites
+*/
+func AbortSuite(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.AbortSuite(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling
+the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer.
+*/
+type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() }
+
+/*
+GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
+Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
+calls out to Gomega
+
+Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
+further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you,
+however if a panic originates on a goroutine *launched* from one of your specs there's no
+way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func GinkgoRecover() {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(ignorablePanic); ok {
+ return
+ }
+ global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ }
+}
+
+// pushNode is used by the various test construction DSL methods to push nodes onto the suite
+// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits
+func pushNode(node internal.Node, errors []error) bool {
+ exitIfErrors(errors)
+ exitIfErr(global.Suite.PushNode(node))
+ return true
+}
+
+/*
+Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
+Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
+
+Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic
+to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens.
+
+You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
+In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Describe(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+FDescribe focuses specs within the Describe block.
+*/
+func FDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+PDescribe marks specs within the Describe block as pending.
+*/
+func PDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+XDescribe marks specs within the Describe block as pending.
+
+XDescribe is an alias for PDescribe
+*/
+var XDescribe = PDescribe
+
+/* Context is an alias for Describe - it generates the exact same kind of Container node */
+var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func When(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func FWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func PWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+var XWhen = PWhen
+
+/*
+It nodes are Subject nodes that contain your spec code and assertions.
+
+Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure.
+
+You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal.
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
+In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func It(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+FIt allows you to focus an individual It.
+*/
+func FIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+PIt allows you to mark an individual It as pending.
+*/
+func PIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+XIt allows you to mark an individual It as pending.
+
+XIt is an alias for PIt
+*/
+var XIt = PIt
+
+/*
+Specify is an alias for It - it can allow for more natural wording in some context.
+*/
+var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt
+
+/*
+By allows you to better document complex Specs.
+
+Generally you should try to keep your Its short and to the point. This is not always possible, however,
+especially in the context of integration tests that capture complex or lengthy workflows.
+
+By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...)
+and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
+
+By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
+
+Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry
+You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
+*/
+func By(text string, callback ...func()) {
+ exitIfErr(global.Suite.By(text, callback...))
+}
+
+/*
+BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run.
+When running in parallel, each parallel process will call BeforeSuite.
+
+You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func BeforeSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
+}
+
+/*
+AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed.
+AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs.
+
+When running in parallel, each parallel process will call AfterSuite.
+
+You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func AfterSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information
+from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing
+information from that setup to all parallel processes.
+
+SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them.
+The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures:
+
+The first function (which only runs on process #1) can have any of the following the signatures:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func() []byte
+ func(ctx context.Context) []byte
+ func(ctx SpecContext) []byte
+
+The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func(data []byte)
+ func(ctx context.Context, data []byte)
+ func(ctx SpecContext, data []byte)
+
+If either function receives a context.Context/SpecContext it is considered interruptible.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{process1Body, allProcessBody}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes
+and a piece that must only run once - on process #1.
+
+SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1
+and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until
+all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext))
+
+Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{allProcessBody, process1Body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
+}
+
+/*
+BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes
+are defined in nested Container nodes the outermost BeforeEach node closures are run first.
+
+BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
+*/
+func BeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
+}
+
+/*
+JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure.
+This can allow you to separate configuration from creation of resources for a spec.
+
+JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
+*/
+func JustBeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
+}
+
+/*
+AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes
+are defined in nested Container nodes the innermost AfterEach node closures are run first.
+
+Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results.
+
+AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
+*/
+func AfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
+}
+
+/*
+JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec.
+
+JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
+*/
+func JustAfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
+}
+
+/*
+BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run.
+
+Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func BeforeAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
+}
+
+/*
+AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run.
+
+Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior.
+
+AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func AfterAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
+}
+
+/*
+DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec.
+
+DeferCleanup can be passed:
+1. A function that takes no arguments and returns no values.
+2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec).
+3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt.
+4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+
+For example:
+
+ BeforeEach(func() {
+ DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO"))
+ os.SetEnv("FOO", "BAR")
+ })
+
+will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
+
+Similarly:
+
+ BeforeEach(func() {
+ DeferCleanup(func(ctx SpecContext, path) {
+ req, err := http.NewRequestWithContext(ctx, "POST", path, nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+ }, "example.com/cleanup", NodeTimeout(time.Second*3))
+ })
+
+will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)).
+
+When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node)
+When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node)
+When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node)
+
+Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
+You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
+*/
+func DeferCleanup(args ...interface{}) {
+ fail := func(message string, cl types.CodeLocation) {
+ global.Failer.Fail(message, cl)
+ }
+ pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
+}
+
+/*
+AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report.
+
+**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo**
+
+Progress Reports are generated:
+- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`)
+- on nodes decorated with PollProgressAfter
+- on suites run with --poll-progress-after
+- whenever a test times out
+
+Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information.
+
+# AttachProgressReporter returns a function that can be called to detach the progress reporter
+
+You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports
+*/
+func AttachProgressReporter(reporter func() string) func() {
+ return global.Suite.AttachProgressReporter(reporter)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
new file mode 100644
index 000000000..c65af4ce1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
@@ -0,0 +1,143 @@
+package ginkgo
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+/*
+Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type Offset = internal.Offset
+
+/*
+FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type FlakeAttempts = internal.FlakeAttempts
+
+/*
+MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type MustPassRepeatedly = internal.MustPassRepeatedly
+
+/*
+Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Focus = internal.Focus
+
+/*
+Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Pending = internal.Pending
+
+/*
+Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
+Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
+
+You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Serial = internal.Serial
+
+/*
+Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
+They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Ordered = internal.Ordered
+
+/*
+ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
+
+ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const ContinueOnFailure = internal.ContinueOnFailure
+
+/*
+OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
+per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
+The behavior for non-Ordered containers/specs is unchanged.
+
+You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const OncePerOrdered = internal.OncePerOrdered
+
+/*
+Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/".
+Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy.
+
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Label(labels ...string) Labels {
+ return Labels(labels)
+}
+
+/*
+Labels are the type for spec Label decorators. Use Label(...) to construct Labels.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+*/
+type Labels = internal.Labels
+
+/*
+PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.
+
+Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec.
+*/
+type PollProgressAfter = internal.PollProgressAfter
+
+/*
+PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node.
+
+Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval.
+*/
+type PollProgressInterval = internal.PollProgressInterval
+
+/*
+NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context).
+
+If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type NodeTimeout = internal.NodeTimeout
+
+/*
+SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes.
+
+All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout.
+
+If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type SpecTimeout = internal.SpecTimeout
+
+/*
+GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence.
+
+Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user.
+*/
+type GracePeriod = internal.GracePeriod
+
+/*
+SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly
+if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports.
+*/
+const SuppressProgressReporting = internal.SuppressProgressReporting
diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
new file mode 100644
index 000000000..f912bbec6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -0,0 +1,135 @@
+package ginkgo
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Deprecated: Done Channel for asynchronous testing
+
+The Done channel pattern is no longer supported in Ginkgo 2.0.
+See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing
+
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing
+*/
+type Done = internal.Done
+
+/*
+Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0.
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+type Reporter = reporters.DeprecatedReporter
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: GinkgoTestDescription has been replaced with SpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type DeprecatedGinkgoTestDescription struct {
+ FullTestText string
+ ComponentTexts []string
+ TestText string
+
+ FileName string
+ LineNumber int
+
+ Failed bool
+ Duration time.Duration
+}
+type GinkgoTestDescription = DeprecatedGinkgoTestDescription
+
+/*
+Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.CurrentGinkgoTestDescription(),
+ types.NewCodeLocation(1),
+ )
+ report := global.Suite.CurrentSpecReport()
+ if report.State == types.SpecStateInvalid {
+ return GinkgoTestDescription{}
+ }
+ componentTexts := []string{}
+ componentTexts = append(componentTexts, report.ContainerHierarchyTexts...)
+ componentTexts = append(componentTexts, report.LeafNodeText)
+
+ return DeprecatedGinkgoTestDescription{
+ ComponentTexts: componentTexts,
+ FullTestText: report.FullText(),
+ TestText: report.LeafNodeText,
+ FileName: report.LeafNodeLocation.FileName,
+ LineNumber: report.LeafNodeLocation.LineNumber,
+ Failed: report.State.Is(types.SpecStateFailureStates),
+ Duration: report.RunTime,
+ }
+}
+
+/*
+Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess()
+*/
+func GinkgoParallelNode() int {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.ParallelNode(),
+ types.NewCodeLocation(1),
+ )
+ return GinkgoParallelProcess()
+}
+
+/*
+Deprecated: Benchmarker has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+type Benchmarker interface {
+ Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...interface{})
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+}
+
+/*
+Deprecated: Measure() has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+func Measure(_ ...interface{}) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
+ return true
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
similarity index 76%
rename from vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
rename to vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
index 91b5cef30..778bfd7c7 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
@@ -1,3 +1,11 @@
+// +build !windows
+
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
The MIT License (MIT)
Copyright (c) 2016 Yasuhiro Matsumoto
@@ -19,3 +27,15 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
+*/
+
+package formatter
+
+import (
+ "io"
+ "os"
+)
+
+func newColorable(file *os.File) io.Writer {
+ return file
+}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
similarity index 90%
rename from vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
rename to vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
index 108800923..dd1d143cc 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
@@ -1,4 +1,33 @@
-package colorable
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
import (
"bytes"
@@ -10,10 +39,24 @@ import (
"strings"
"syscall"
"unsafe"
+)
- "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty"
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
+func isTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
const (
foregroundBlue = 0x1
foregroundGreen = 0x2
@@ -52,45 +95,28 @@ type consoleScreenBufferInfo struct {
maximumWindowSize coord
}
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
- procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
- procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
- procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
- procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
-)
-
-type Writer struct {
+type writer struct {
out io.Writer
handle syscall.Handle
lastbuf bytes.Buffer
oldattr word
}
-func NewColorable(file *os.File) io.Writer {
+func newColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
- if isatty.IsTerminal(file.Fd()) {
+ if isTerminal(file.Fd()) {
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
- return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
+ return &writer{out: file, handle: handle, oldattr: csbi.attributes}
} else {
return file
}
}
-func NewColorableStdout() io.Writer {
- return NewColorable(os.Stdout)
-}
-
-func NewColorableStderr() io.Writer {
- return NewColorable(os.Stderr)
-}
-
var color256 = map[int]int{
0: 0x000000,
1: 0x800000,
@@ -350,7 +376,7 @@ var color256 = map[int]int{
255: 0xeeeeee,
}
-func (w *Writer) Write(data []byte) (n int, err error) {
+func (w *writer) Write(data []byte) (n int, err error) {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
diff --git a/vendor/github.com/onsi/ginkgo/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
similarity index 64%
rename from vendor/github.com/onsi/ginkgo/formatter/formatter.go
rename to vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
index 30d7cbe12..743555dde 100644
--- a/vendor/github.com/onsi/ginkgo/formatter/formatter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -2,10 +2,16 @@ package formatter
import (
"fmt"
+ "os"
"regexp"
+ "strconv"
"strings"
)
+// ColorableStdOut and ColorableStdErr enable color output support on Windows
+var ColorableStdOut = newColorable(os.Stdout)
+var ColorableStdErr = newColorable(os.Stderr)
+
const COLS = 80
type ColorMode uint8
@@ -45,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter {
}
func New(colorMode ColorMode) Formatter {
+ colorAliases := map[string]int{
+ "black": 0,
+ "red": 1,
+ "green": 2,
+ "yellow": 3,
+ "blue": 4,
+ "magenta": 5,
+ "cyan": 6,
+ "white": 7,
+ }
+ for colorAlias, n := range colorAliases {
+ colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
+ }
+
+ getColor := func(color, defaultEscapeCode string) string {
+ color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
+ envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
+ envVarColor := os.Getenv(envVar)
+ if envVarColor == "" {
+ return defaultEscapeCode
+ }
+ if colorCode, ok := colorAliases[envVarColor]; ok {
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+ colorCode, err := strconv.Atoi(envVarColor)
+ if err != nil || colorCode < 0 || colorCode > 255 {
+ return defaultEscapeCode
+ }
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+
f := Formatter{
ColorMode: colorMode,
colors: map[string]string{
@@ -52,18 +89,18 @@ func New(colorMode ColorMode) Formatter {
"bold": "\x1b[1m",
"underline": "\x1b[4m",
- "red": "\x1b[38;5;9m",
- "orange": "\x1b[38;5;214m",
- "coral": "\x1b[38;5;204m",
- "magenta": "\x1b[38;5;13m",
- "green": "\x1b[38;5;10m",
- "dark-green": "\x1b[38;5;28m",
- "yellow": "\x1b[38;5;11m",
- "light-yellow": "\x1b[38;5;228m",
- "cyan": "\x1b[38;5;14m",
- "gray": "\x1b[38;5;243m",
- "light-gray": "\x1b[38;5;246m",
- "blue": "\x1b[38;5;12m",
+ "red": getColor("red", "\x1b[38;5;9m"),
+ "orange": getColor("orange", "\x1b[38;5;214m"),
+ "coral": getColor("coral", "\x1b[38;5;204m"),
+ "magenta": getColor("magenta", "\x1b[38;5;13m"),
+ "green": getColor("green", "\x1b[38;5;10m"),
+ "dark-green": getColor("dark-green", "\x1b[38;5;28m"),
+ "yellow": getColor("yellow", "\x1b[38;5;11m"),
+ "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"),
+ "cyan": getColor("cyan", "\x1b[38;5;14m"),
+ "gray": getColor("gray", "\x1b[38;5;243m"),
+ "light-gray": getColor("light-gray", "\x1b[38;5;246m"),
+ "blue": getColor("blue", "\x1b[38;5;12m"),
},
}
colors := []string{}
@@ -83,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri
}
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
- out := fmt.Sprintf(f.style(format), args...)
+ out := f.style(format)
+ if len(args) > 0 {
+ out = fmt.Sprintf(out, args...)
+ }
if indentation == 0 && maxWidth == 0 {
return out
@@ -100,13 +140,13 @@ func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...i
outLines = append(outLines, line)
continue
}
- outWords := []string{}
- length := uint(0)
words := strings.Split(line, " ")
- for _, word := range words {
+ outWords := []string{words[0]}
+ length := uint(f.length(words[0]))
+ for _, word := range words[1:] {
wordLength := f.length(word)
- if length+wordLength <= maxWidth {
- length += wordLength
+ if length+wordLength+1 <= maxWidth {
+ length += wordLength + 1
outWords = append(outWords, word)
continue
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
new file mode 100644
index 000000000..5db5d1a7b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -0,0 +1,63 @@
+package build
+
+import (
+ "fmt"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBuildCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "build",
+ Flags: flags,
+ Usage: "ginkgo build ",
+ ShortDoc: "Build the passed in (or the package in the current directory if left blank).",
+ DocLink: "precompiling-suites",
+ Command: func(args []string, _ []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ buildSpecs(args, cliConfig, goFlagsConfig)
+ },
+ }
+}
+
+func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, goFlagsConfig)
+
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break
+ }
+ suites[suiteIdx] = suite
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ } else {
+ fmt.Printf("Compiled %s.test\n", suite.PackageName)
+ }
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
+ command.AbortWith("Failed to compile all tests")
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
new file mode 100644
index 000000000..2efd28608
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -0,0 +1,61 @@
+package command
+
+import "fmt"
+
+type AbortDetails struct {
+ ExitCode int
+ Error error
+ EmitUsage bool
+}
+
+func Abort(details AbortDetails) {
+ panic(details)
+}
+
+func AbortGracefullyWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 0,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWithUsage(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: true,
+ })
+}
+
+func AbortIfError(preamble string, err error) {
+ if err != nil {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
+ EmitUsage: false,
+ })
+ }
+}
+
+func AbortIfErrors(preamble string, errors []error) {
+ if len(errors) > 0 {
+ out := ""
+ for _, err := range errors {
+ out += err.Error()
+ }
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, out),
+ EmitUsage: false,
+ })
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
new file mode 100644
index 000000000..12e0e5659
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Command struct {
+ Name string
+ Flags types.GinkgoFlagSet
+ Usage string
+ ShortDoc string
+ Documentation string
+ DocLink string
+ Command func(args []string, additionalArgs []string)
+}
+
+func (c Command) Run(args []string, additionalArgs []string) {
+ args, err := c.Flags.Parse(args)
+ if err != nil {
+ AbortWithUsage(err.Error())
+ }
+
+ c.Command(args, additionalArgs)
+}
+
+func (c Command) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
+ if c.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
+ fmt.Fprintln(writer, "")
+ }
+ if c.Documentation != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
+ fmt.Fprintln(writer, "")
+ }
+ if c.DocLink != "" {
+ fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
+ fmt.Fprintln(writer, "")
+ }
+ flagUsage := c.Flags.Usage()
+ if flagUsage != "" {
+ fmt.Fprintf(writer, formatter.F(flagUsage))
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
new file mode 100644
index 000000000..88dd8d6b0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -0,0 +1,182 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Program struct {
+ Name string
+ Heading string
+ Commands []Command
+ DefaultCommand Command
+ DeprecatedCommands []DeprecatedCommand
+
+ //For testing - leave as nil in production
+ OutWriter io.Writer
+ ErrWriter io.Writer
+ Exiter func(code int)
+}
+
+type DeprecatedCommand struct {
+ Name string
+ Deprecation types.Deprecation
+}
+
+func (p Program) RunAndExit(osArgs []string) {
+ var command Command
+ deprecationTracker := types.NewDeprecationTracker()
+ if p.Exiter == nil {
+ p.Exiter = os.Exit
+ }
+ if p.OutWriter == nil {
+ p.OutWriter = formatter.ColorableStdOut
+ }
+ if p.ErrWriter == nil {
+ p.ErrWriter = formatter.ColorableStdErr
+ }
+
+ defer func() {
+ exitCode := 0
+
+ if r := recover(); r != nil {
+ details, ok := r.(AbortDetails)
+ if !ok {
+ panic(r)
+ }
+
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
+ fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
+ }
+ if details.EmitUsage {
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, "")
+ }
+ command.EmitUsage(p.ErrWriter)
+ }
+ exitCode = details.ExitCode
+ }
+
+ command.Flags.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
+ }
+ p.Exiter(exitCode)
+ return
+ }()
+
+ args, additionalArgs := []string{}, []string{}
+
+ foundDelimiter := false
+ for _, arg := range osArgs[1:] {
+ if !foundDelimiter {
+ if arg == "--" {
+ foundDelimiter = true
+ continue
+ }
+ }
+
+ if foundDelimiter {
+ additionalArgs = append(additionalArgs, arg)
+ } else {
+ args = append(args, arg)
+ }
+ }
+
+ command = p.DefaultCommand
+ if len(args) > 0 {
+ p.handleHelpRequestsAndExit(p.OutWriter, args)
+ if command.Name == args[0] {
+ args = args[1:]
+ } else {
+ for _, deprecatedCommand := range p.DeprecatedCommands {
+ if deprecatedCommand.Name == args[0] {
+ deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
+ return
+ }
+ }
+ for _, tryCommand := range p.Commands {
+ if tryCommand.Name == args[0] {
+ command, args = tryCommand, args[1:]
+ break
+ }
+ }
+ }
+ }
+
+ command.Run(args, additionalArgs)
+}
+
+func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
+ if len(args) == 0 {
+ return
+ }
+
+ matchesHelpFlag := func(args ...string) bool {
+ for _, arg := range args {
+ if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
+ return true
+ }
+ }
+ return false
+ }
+ if len(args) == 1 {
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ p.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ } else {
+ var name string
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ name = args[1]
+ } else if matchesHelpFlag(args[1:]...) {
+ name = args[0]
+ } else {
+ return
+ }
+
+ if p.DefaultCommand.Name == name || p.Name == name {
+ p.DefaultCommand.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ for _, command := range p.Commands {
+ if command.Name == name {
+ command.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ }
+
+ fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
+ fmt.Fprintln(writer, "")
+ p.EmitUsage(writer)
+ Abort(AbortDetails{ExitCode: 1})
+ }
+ return
+}
+
+func (p Program) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F(p.Heading))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
+ fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
+ fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
+ fmt.Fprintln(writer, "")
+ fmt.Fprintln(writer, formatter.F("The following commands are available:"))
+
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
+ if p.DefaultCommand.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
+ }
+
+ for _, command := range p.Commands {
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
+ if command.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
+ }
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
new file mode 100644
index 000000000..a367a1fc9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
@@ -0,0 +1,48 @@
+package generators
+
+var bootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = {{.GinkgoPackage}}BeforeSuite(func() {
+ // Choose a WebDriver:
+
+ agoutiDriver = agouti.PhantomJS()
+ // agoutiDriver = agouti.Selenium()
+ // agoutiDriver = agouti.ChromeDriver()
+
+ {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
+})
+
+var _ = {{.GinkgoPackage}}AfterSuite(func() {
+ {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
+})
+`
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
new file mode 100644
index 000000000..73aff0b7a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
@@ -0,0 +1,133 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBootstrapCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "bootstrap",
+ Usage: "ginkgo bootstrap",
+ ShortDoc: "Bootstrap a test suite for the current package",
+ Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
+
+{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(_ []string, _ []string) {
+ generateBootstrap(conf)
+ },
+ }
+}
+
+type bootstrapData struct {
+ Package string
+ FormattedName string
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateBootstrap(conf GeneratorsConfig) {
+ packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+
+ data := bootstrapData{
+ Package: determinePackageName(packageName, conf.Internal),
+ FormattedName: formattedName,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom bootstrap file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom boostrap data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiBootstrapText
+ } else {
+ templateText = bootstrapText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to parse bootstrap template:", err)
+
+ buf := &bytes.Buffer{}
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = bootstrapTemplate.Execute(buf, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+
+ buf.WriteTo(f)
+
+ internal.GoFmt(targetFile)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
new file mode 100644
index 000000000..48d23f919
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -0,0 +1,259 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildGenerateCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, generate will create a test file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the test file template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "generate",
+ Usage: "ginkgo generate ",
+ ShortDoc: "Generate a test file named _test.go",
+ Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created.
+
+You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go.
+
+You can also pass a of the form "file.go" and generate will emit "file_test.go".`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ generateTestFiles(conf, args)
+ },
+ }
+}
+
+type specData struct {
+ Package string
+ Subject string
+ PackageImportPath string
+ ImportPackage bool
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateTestFiles(conf GeneratorsConfig, args []string) {
+ subjects := args
+ if len(subjects) == 0 {
+ subjects = []string{""}
+ }
+ for _, subject := range subjects {
+ generateTestFileForSubject(subject, conf)
+ }
+}
+
+func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
+ packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+ if subject != "" {
+ specFilePrefix = formatSubject(subject)
+ formattedName = prettifyName(specFilePrefix)
+ }
+
+ if conf.Internal {
+ specFilePrefix = specFilePrefix + "_internal"
+ }
+
+ data := specData{
+ Package: determinePackageName(packageName, conf.Internal),
+ Subject: formattedName,
+ PackageImportPath: getPackageImportPath(),
+ ImportPackage: !conf.Internal,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create test file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom template file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom template data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiSpecText
+ } else {
+ templateText = specText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to read parse test template:", err)
+
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = specTemplate.Execute(f, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+ internal.GoFmt(targetFile)
+}
+
+func formatSubject(name string) string {
+ name = strings.ReplaceAll(name, "-", "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ name = strings.Split(name, ".go")[0]
+ name = strings.Split(name, "_test")[0]
+ return name
+}
+
+// moduleName returns module name from go.mod from given module root directory
+func moduleName(modRoot string) string {
+ modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
+ if err != nil {
+ return ""
+ }
+
+ mod := make([]byte, 128)
+ _, err = modFile.Read(mod)
+ if err != nil {
+ return ""
+ }
+
+ slashSlash := []byte("//")
+ moduleStr := []byte("module")
+
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+
+ return "" // missing module path
+}
+
+func findModuleRoot(dir string) (root string) {
+ dir = filepath.Clean(dir)
+
+ // Look for enclosing go.mod.
+ for {
+ if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ return dir
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return ""
+}
+
+func getPackageImportPath() string {
+ workingDir, err := os.Getwd()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ sep := string(filepath.Separator)
+
+ // Try go.mod file first
+ modRoot := findModuleRoot(workingDir)
+ if modRoot != "" {
+ modName := moduleName(modRoot)
+ if modName != "" {
+ cd := strings.ReplaceAll(workingDir, modRoot, "")
+ cd = strings.ReplaceAll(cd, sep, "/")
+ return modName + cd
+ }
+ }
+
+ // Fallback to GOPATH structure
+ paths := strings.Split(workingDir, sep+"src"+sep)
+ if len(paths) == 1 {
+ fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+ return "UNKNOWN_PACKAGE_PATH"
+ }
+ return filepath.ToSlash(paths[len(paths)-1])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
new file mode 100644
index 000000000..c3470adbf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
@@ -0,0 +1,41 @@
+package generators
+
+var specText = `package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+
+})
+`
+
+var agoutiSpecText = `package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+ . "github.com/sclevine/agouti/matchers"
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+ var page *agouti.Page
+
+ {{.GinkgoPackage}}BeforeEach(func() {
+ var err error
+ page, err = agoutiDriver.NewPage()
+ {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
+ })
+
+ {{.GinkgoPackage}}AfterEach(func() {
+ {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
+ })
+})
+`
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
new file mode 100644
index 000000000..3046a4487
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
@@ -0,0 +1,64 @@
+package generators
+
+import (
+ "go/build"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+type GeneratorsConfig struct {
+ Agouti, NoDot, Internal bool
+ CustomTemplate string
+ CustomTemplateData string
+}
+
+func getPackageAndFormattedName() (string, string, string) {
+ path, err := os.Getwd()
+ command.AbortIfError("Could not get current working directory:", err)
+
+ dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
+ dirName = strings.ReplaceAll(dirName, " ", "_")
+
+ pkg, err := build.ImportDir(path, 0)
+ packageName := pkg.Name
+ if err != nil {
+ packageName = ensureLegalPackageName(dirName)
+ }
+
+ formattedName := prettifyName(filepath.Base(path))
+ return packageName, dirName, formattedName
+}
+
+func ensureLegalPackageName(name string) string {
+ if name == "_" {
+ return "underscore"
+ }
+ if len(name) == 0 {
+ return "empty"
+ }
+ n, isDigitErr := strconv.Atoi(string(name[0]))
+ if isDigitErr == nil {
+ return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
+ }
+ return name
+}
+
+func prettifyName(name string) string {
+ name = strings.ReplaceAll(name, "-", " ")
+ name = strings.ReplaceAll(name, "_", " ")
+ name = strings.Title(name)
+ name = strings.ReplaceAll(name, " ", "")
+ return name
+}
+
+func determinePackageName(name string, internal bool) string {
+ if internal {
+ return name
+ }
+
+ return name + "_test"
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
new file mode 100644
index 000000000..86da7340d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -0,0 +1,161 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ if suite.PathToCompiledTest != "" {
+ return suite
+ }
+
+ suite.CompilationError = nil
+
+ path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
+ return suite
+ }
+
+ ginkgoInvocationPath, _ := os.Getwd()
+ ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
+ packagePath := suite.AbsPath()
+ pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
+ return suite
+ }
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
+ return suite
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if len(output) > 0 {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
+ } else {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
+ }
+ return suite
+ }
+
+ if strings.Contains(string(output), "[no test files]") {
+ suite.State = TestSuiteStateSkippedDueToEmptyCompilation
+ return suite
+ }
+
+ if len(output) > 0 {
+ fmt.Println(string(output))
+ }
+
+ if !FileExists(path) {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
+ return suite
+ }
+
+ suite.State = TestSuiteStateCompiled
+ suite.PathToCompiledTest = path
+ return suite
+}
+
+func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
+ if goFlagsConfig.BinaryMustBePreserved() {
+ return
+ }
+ for _, suite := range suites {
+ if !suite.Precompiled {
+ os.Remove(suite.PathToCompiledTest)
+ }
+ }
+}
+
+type parallelSuiteBundle struct {
+ suite TestSuite
+ compiled chan TestSuite
+}
+
+type OrderedParallelCompiler struct {
+ mutex *sync.Mutex
+ stopped bool
+ numCompilers int
+
+ idx int
+ numSuites int
+ completionChannels []chan TestSuite
+}
+
+func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
+ return &OrderedParallelCompiler{
+ mutex: &sync.Mutex{},
+ numCompilers: numCompilers,
+ }
+}
+
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+ opc.stopped = false
+ opc.idx = 0
+ opc.numSuites = len(suites)
+ opc.completionChannels = make([]chan TestSuite, opc.numSuites)
+
+ toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
+ for compiler := 0; compiler < opc.numCompilers; compiler++ {
+ go func() {
+ for bundle := range toCompile {
+ c, suite := bundle.compiled, bundle.suite
+ opc.mutex.Lock()
+ stopped := opc.stopped
+ opc.mutex.Unlock()
+ if !stopped {
+ suite = CompileSuite(suite, goFlagsConfig)
+ }
+ c <- suite
+ }
+ }()
+ }
+
+ for idx, suite := range suites {
+ opc.completionChannels[idx] = make(chan TestSuite, 1)
+ toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
+ if idx == 0 { //compile first suite serially
+ suite = <-opc.completionChannels[0]
+ opc.completionChannels[0] <- suite
+ }
+ }
+
+ close(toCompile)
+}
+
+func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
+ if opc.idx >= opc.numSuites {
+ return opc.numSuites, TestSuite{}
+ }
+
+ idx := opc.idx
+ suite := <-opc.completionChannels[idx]
+ opc.idx = opc.idx + 1
+
+ return idx, suite
+}
+
+func (opc *OrderedParallelCompiler) StopAndDrain() {
+ opc.mutex.Lock()
+ opc.stopped = true
+ opc.mutex.Unlock()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
new file mode 100644
index 000000000..bd3c6d028
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -0,0 +1,237 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+
+ "github.com/google/pprof/profile"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
+ suffix := ""
+ if process != 0 {
+ suffix = fmt.Sprintf(".%d", process)
+ }
+ if cliConfig.OutputDir == "" {
+ return filepath.Join(suite.AbsPath(), assetName+suffix)
+ }
+ outputDir, _ := filepath.Abs(cliConfig.OutputDir)
+ return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
+}
+
+func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
+ messages := []string{}
+ suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
+
+ // merge cover profiles if need be
+ if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
+ coverProfiles := []string{}
+ for _, suite := range suitesWithProfiles {
+ if !suite.HasProgrammaticFocus {
+ coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ dst := goFlagsConfig.CoverProfile
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
+ }
+ err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
+ if err != nil {
+ return messages, err
+ }
+ coverage, err := GetCoverageFromCoverProfile(dst)
+ if err != nil {
+ return messages, err
+ }
+ if coverage == 0 {
+ messages = append(messages, "composite coverage: [no statements]")
+ } else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
+ } else {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
+ }
+ } else {
+ messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
+ }
+ }
+
+ // copy binaries if need be
+ for _, suite := range suitesWithProfiles {
+ if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
+ src := suite.PathToCompiledTest
+ dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
+ if suite.Precompiled {
+ if err := CopyFile(src, dst); err != nil {
+ return messages, err
+ }
+ } else {
+ if err := os.Rename(src, dst); err != nil {
+ return messages, err
+ }
+ }
+ }
+ }
+
+ type reportFormat struct {
+ ReportName string
+ GenerateFunc func(types.Report, string) error
+ MergeFunc func([]string, string) ([]string, error)
+ }
+ reportFormats := []reportFormat{}
+ if reporterConfig.JSONReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
+ }
+ if reporterConfig.JUnitReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
+ }
+
+ // Generate reports for suites that failed to run
+ reportableSuites := suites.ThatAreGinkgoSuites()
+ for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
+ report := types.Report{
+ SuitePath: suite.AbsPath(),
+ SuiteConfig: suiteConfig,
+ SuiteSucceeded: false,
+ }
+ switch suite.State {
+ case TestSuiteStateFailedToCompile:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
+ case TestSuiteStateFailedDueToTimeout:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToPriorFailures:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToEmptyCompilation:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
+ report.SuiteSucceeded = true
+ }
+
+ for _, format := range reportFormats {
+ format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ }
+
+ // Merge reports unless we've been asked to keep them separate
+ if !cliConfig.KeepSeparateReports {
+ for _, format := range reportFormats {
+ reports := []string{}
+ for _, suite := range reportableSuites {
+ reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ dst := format.ReportName
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
+ }
+ mergeMessages, err := format.MergeFunc(reports, dst)
+ messages = append(messages, mergeMessages...)
+ if err != nil {
+ return messages, err
+ }
+ }
+ }
+
+ return messages, nil
+}
+
+//loads each profile, combines them, deletes them, stores them in destination
+func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
+ combined := &bytes.Buffer{}
+ modeRegex := regexp.MustCompile(`^mode: .*\n`)
+ for i, profile := range profiles {
+ contents, err := os.ReadFile(profile)
+ if err != nil {
+ return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
+ }
+ os.Remove(profile)
+
+ // remove the cover mode line from every file
+ // except the first one
+ if i > 0 {
+ contents = modeRegex.ReplaceAll(contents, []byte{})
+ }
+
+ _, err = combined.Write(contents)
+
+ // Add a newline to the end of every file if missing.
+ if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
+ _, err = combined.Write([]byte("\n"))
+ }
+
+ if err != nil {
+ return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
+ }
+ }
+
+ err := os.WriteFile(destination, combined.Bytes(), 0666)
+ if err != nil {
+ return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
+ }
+ return nil
+}
+
+func GetCoverageFromCoverProfile(profile string) (float64, error) {
+ cmd := exec.Command("go", "tool", "cover", "-func", profile)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error())
+ }
+ re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
+ matches := re.FindStringSubmatch(string(output))
+ if matches == nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
+ }
+ coverageString := matches[1]
+ coverage, err := strconv.ParseFloat(coverageString, 64)
+ if err != nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
+ }
+
+ return coverage, nil
+}
+
+func MergeProfiles(profilePaths []string, destination string) error {
+ profiles := []*profile.Profile{}
+ for _, profilePath := range profilePaths {
+ proFile, err := os.Open(profilePath)
+ if err != nil {
+ return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
+ }
+ prof, err := profile.Parse(proFile)
+ if err != nil {
+ return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
+ }
+ profiles = append(profiles, prof)
+ os.Remove(profilePath)
+ }
+
+ mergedProfile, err := profile.Merge(profiles)
+ if err != nil {
+ return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
+ }
+
+ outFile, err := os.Create(destination)
+ if err != nil {
+ return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
+ }
+ err = mergedProfile.Write(outFile)
+ if err != nil {
+ return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
+ }
+ err = outFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
new file mode 100644
index 000000000..41052ea19
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
@@ -0,0 +1,355 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ suite.State = TestSuiteStateFailed
+ suite.HasProgrammaticFocus = false
+
+ if suite.PathToCompiledTest == "" {
+ return suite
+ }
+
+ if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
+ suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else if suite.IsGinkgo {
+ suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else {
+ suite = runGoTest(suite, cliConfig, goFlagsConfig)
+ }
+ runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
+ return suite
+}
+
+func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
+ buf := &bytes.Buffer{}
+ cmd := exec.Command(suite.PathToCompiledTest, args...)
+ cmd.Dir = suite.Path
+ if pipeToStdout {
+ cmd.Stderr = io.MultiWriter(os.Stdout, buf)
+ cmd.Stdout = os.Stdout
+ } else {
+ cmd.Stderr = buf
+ cmd.Stdout = buf
+ }
+ err := cmd.Start()
+ command.AbortIfError("Failed to start test suite", err)
+
+ return cmd, buf
+}
+
+func checkForNoTestsWarning(buf *bytes.Buffer) bool {
+ if strings.Contains(buf.String(), "warning: no tests to run") {
+ fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
+ return true
+ }
+ return false
+}
+
+func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ // As we run the go test from the suite directory, make sure the cover profile is absolute
+ // and placed into the expected output directory when one is configured.
+ if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ return suite
+}
+
+func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ if goFlagsConfig.Cover {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ }
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ if suite.HasProgrammaticFocus {
+ if goFlagsConfig.Cover {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MemProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ }
+ }
+
+ return suite
+}
+
+func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ type procResult struct {
+ passed bool
+ hasProgrammaticFocus bool
+ }
+
+ numProcs := cliConfig.ComputedProcs()
+ procOutput := make([]*bytes.Buffer, numProcs)
+ coverProfiles := []string{}
+
+ blockProfiles := []string{}
+ cpuProfiles := []string{}
+ memProfiles := []string{}
+ mutexProfiles := []string{}
+
+ procResults := make(chan procResult)
+
+ server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
+ command.AbortIfError("Failed to start parallel spec server", err)
+ server.Start()
+ defer server.Close()
+
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ for proc := 1; proc <= numProcs; proc++ {
+ procGinkgoConfig := ginkgoConfig
+ procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
+
+ procGoFlagsConfig := goFlagsConfig
+ if goFlagsConfig.Cover {
+ procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
+ coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
+ blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
+ cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
+ memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
+ mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, false)
+ procOutput[proc-1] = buf
+ server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
+
+ go func() {
+ cmd.Wait()
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ procResults <- procResult{
+ passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
+ hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
+ }
+ }()
+ }
+
+ passed := true
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ result := <-procResults
+ passed = passed && result.passed
+ suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
+ }
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ select {
+ case <-server.GetSuiteDone():
+ fmt.Println("")
+ case <-time.After(time.Second):
+ //one of the nodes never finished reporting to the server. Something must have gone wrong.
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
+ fmt.Fprintln(formatter.ColorableStdErr, " ")
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
+ fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
+ }
+ fmt.Fprintf(os.Stderr, "** End **")
+ }
+
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ output := procOutput[proc-1].String()
+ if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
+ suite.State = TestSuiteStateFailed
+ }
+ if strings.Contains(output, "deprecated Ginkgo functionality") {
+ fmt.Fprintln(os.Stderr, output)
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ } else {
+ coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
+ command.AbortIfError("Failed to combine cover profiles", err)
+
+ coverage, err := GetCoverageFromCoverProfile(coverProfile)
+ command.AbortIfError("Failed to compute coverage", err)
+ if coverage == 0 {
+ fmt.Fprintln(os.Stdout, "coverage: [no statements]")
+ } else {
+ fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
+ }
+ }
+ }
+ if len(blockProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ } else {
+ blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ err := MergeProfiles(blockProfiles, blockProfile)
+ command.AbortIfError("Failed to combine blockprofiles", err)
+ }
+ }
+ if len(cpuProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ } else {
+ cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ err := MergeProfiles(cpuProfiles, cpuProfile)
+ command.AbortIfError("Failed to combine cpuprofiles", err)
+ }
+ }
+ if len(memProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ } else {
+ memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ err := MergeProfiles(memProfiles, memProfile)
+ command.AbortIfError("Failed to combine memprofiles", err)
+ }
+ }
+ if len(mutexProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ } else {
+ mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ err := MergeProfiles(mutexProfiles, mutexProfile)
+ command.AbortIfError("Failed to combine mutexprofiles", err)
+ }
+ }
+
+ return suite
+}
+
+func runAfterRunHook(command string, noColor bool, suite TestSuite) {
+ if command == "" {
+ return
+ }
+ f := formatter.NewWithNoColorBool(noColor)
+
+ // Allow for string replacement to pass input to the command
+ passed := "[FAIL]"
+ if suite.State.Is(TestSuiteStatePassed) {
+ passed = "[PASS]"
+ }
+ command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
+ command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
+
+ // Must break command into parts
+ splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
+ parts := splitArgs.FindAllString(command, -1)
+
+ output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
+ if err != nil {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
new file mode 100644
index 000000000..64dcb1b78
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
@@ -0,0 +1,283 @@
+package internal
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
+const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
+const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
+
+type TestSuiteState uint
+
+const (
+ TestSuiteStateInvalid TestSuiteState = iota
+
+ TestSuiteStateUncompiled
+ TestSuiteStateCompiled
+
+ TestSuiteStatePassed
+
+ TestSuiteStateSkippedDueToEmptyCompilation
+ TestSuiteStateSkippedByFilter
+ TestSuiteStateSkippedDueToPriorFailures
+
+ TestSuiteStateFailed
+ TestSuiteStateFailedDueToTimeout
+ TestSuiteStateFailedToCompile
+)
+
+var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
+
+func (state TestSuiteState) Is(states ...TestSuiteState) bool {
+ for _, suiteState := range states {
+ if suiteState == state {
+ return true
+ }
+ }
+
+ return false
+}
+
+type TestSuite struct {
+ Path string
+ PackageName string
+ IsGinkgo bool
+
+ Precompiled bool
+ PathToCompiledTest string
+ CompilationError error
+
+ HasProgrammaticFocus bool
+ State TestSuiteState
+}
+
+func (ts TestSuite) AbsPath() string {
+ path, _ := filepath.Abs(ts.Path)
+ return path
+}
+
+func (ts TestSuite) NamespacedName() string {
+ name := relPath(ts.Path)
+ name = strings.TrimLeft(name, "."+string(filepath.Separator))
+ name = strings.ReplaceAll(name, string(filepath.Separator), "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ if name == "" {
+ return ts.PackageName
+ }
+ return name
+}
+
+type TestSuites []TestSuite
+
+func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
+ for _, suite := range ts {
+ if suite.HasProgrammaticFocus {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.IsGinkgo {
+ out = append(out, suite)
+ }
+ }
+ return out
+}
+
+func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
+ n := 0
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ n += 1
+ }
+ }
+
+ return n
+}
+
+func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if !suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
+ out := make(TestSuites, len(ts))
+ permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
+ for i, j := range permutation {
+ out[i] = ts[j]
+ }
+ return out
+}
+
+func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
+ suites := TestSuites{}
+
+ if len(args) > 0 {
+ for _, arg := range args {
+ if allowPrecompiled {
+ suite, err := precompiledTestSuite(arg)
+ if err == nil {
+ suites = append(suites, suite)
+ continue
+ }
+ }
+ recurseForSuite := cliConfig.Recurse
+ if strings.HasSuffix(arg, "/...") && arg != "/..." {
+ arg = arg[:len(arg)-4]
+ recurseForSuite = true
+ }
+ suites = append(suites, suitesInDir(arg, recurseForSuite)...)
+ }
+ } else {
+ suites = suitesInDir(".", cliConfig.Recurse)
+ }
+
+ if cliConfig.SkipPackage != "" {
+ skipFilters := strings.Split(cliConfig.SkipPackage, ",")
+ for idx := range suites {
+ for _, skipFilter := range skipFilters {
+ if strings.Contains(suites[idx].Path, skipFilter) {
+ suites[idx].State = TestSuiteStateSkippedByFilter
+ break
+ }
+ }
+ }
+ }
+
+ return suites
+}
+
+func precompiledTestSuite(path string) (TestSuite, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ if info.IsDir() {
+ return TestSuite{}, errors.New("this is a directory, not a file")
+ }
+
+ if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
+ return TestSuite{}, errors.New("this is not a .test binary")
+ }
+
+ if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 {
+ return TestSuite{}, errors.New("this is not executable")
+ }
+
+ dir := relPath(filepath.Dir(path))
+ packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
+ packageName = strings.TrimSuffix(packageName, ".test")
+
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ return TestSuite{
+ Path: dir,
+ PackageName: packageName,
+ IsGinkgo: true,
+ Precompiled: true,
+ PathToCompiledTest: path,
+ State: TestSuiteStateCompiled,
+ }, nil
+}
+
+func suitesInDir(dir string, recurse bool) TestSuites {
+ suites := TestSuites{}
+
+ if path.Base(dir) == "vendor" {
+ return suites
+ }
+
+ files, _ := os.ReadDir(dir)
+ re := regexp.MustCompile(`^[^._].*_test\.go$`)
+ for _, file := range files {
+ if !file.IsDir() && re.Match([]byte(file.Name())) {
+ suite := TestSuite{
+ Path: relPath(dir),
+ PackageName: packageNameForSuite(dir),
+ IsGinkgo: filesHaveGinkgoSuite(dir, files),
+ State: TestSuiteStateUncompiled,
+ }
+ suites = append(suites, suite)
+ break
+ }
+ }
+
+ if recurse {
+ re = regexp.MustCompile(`^[._]`)
+ for _, file := range files {
+ if file.IsDir() && !re.Match([]byte(file.Name())) {
+ suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
+ }
+ }
+ }
+
+ return suites
+}
+
+func relPath(dir string) string {
+ dir, _ = filepath.Abs(dir)
+ cwd, _ := os.Getwd()
+ dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
+
+ if string(dir[0]) != "." {
+ dir = "." + string(filepath.Separator) + dir
+ }
+
+ return dir
+}
+
+func packageNameForSuite(dir string) string {
+ path, _ := filepath.Abs(dir)
+ return filepath.Base(path)
+}
+
+func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
+ reTestFile := regexp.MustCompile(`_test\.go$`)
+ reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
+
+ for _, file := range files {
+ if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
+ contents, _ := os.ReadFile(dir + "/" + file.Name())
+ if reGinkgo.Match(contents) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
new file mode 100644
index 000000000..bd9ca7d51
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
@@ -0,0 +1,86 @@
+package internal
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func FileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func CopyFile(src string, dest string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(dest); err == nil {
+ os.Remove(dest)
+ }
+
+ destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(destFile, srcFile)
+ if err != nil {
+ return err
+ }
+
+ if err := srcFile.Close(); err != nil {
+ return err
+ }
+ return destFile.Close()
+}
+
+func GoFmt(path string) {
+ out, err := exec.Command("go", "fmt", path).CombinedOutput()
+ if err != nil {
+ command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
+ }
+}
+
+func PluralizedWord(singular, plural string, count int) string {
+ if count == 1 {
+ return singular
+ }
+ return plural
+}
+
+func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
+ out := ""
+ out += "There were failures detected in the following suites:\n"
+
+ maxPackageNameLength := 0
+ for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
+ if len(suite.PackageName) > maxPackageNameLength {
+ maxPackageNameLength = len(suite.PackageName)
+ }
+ }
+
+ packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
+ for _, suite := range suites {
+ switch suite.State {
+ case TestSuiteStateFailed:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedToCompile:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedDueToTimeout:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
new file mode 100644
index 000000000..9da1bab3d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
@@ -0,0 +1,54 @@
+package internal
+
+import (
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
+
+func VerifyCLIAndFrameworkVersion(suites TestSuites) {
+ cliVersion := types.VERSION
+ mismatches := map[string][]string{}
+
+ for _, suite := range suites {
+ cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ continue
+ }
+ components := strings.Split(string(output), " ")
+ if len(components) != 2 {
+ continue
+ }
+ matches := versiorRe.FindStringSubmatch(components[1])
+ if matches == nil || len(matches) != 2 {
+ continue
+ }
+ libraryVersion := matches[1]
+ if cliVersion != libraryVersion {
+ mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
+ }
+ }
+
+ if len(mismatches) == 0 {
+ return
+ }
+
+ fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
+
+ fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
+ fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
+ for version, packages := range mismatches {
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
+ }
+ fmt.Println("")
+ fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
new file mode 100644
index 000000000..6c61f09d1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
@@ -0,0 +1,123 @@
+package labels
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+func BuildLabelsCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+
+ flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "labels",
+ Usage: "ginkgo labels ",
+ Flags: flags,
+ ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
+ DocLink: "spec-labels",
+ Command: func(args []string, _ []string) {
+ ListLabels(args, cliConfig)
+ },
+ }
+}
+
+func ListLabels(args []string, cliConfig types.CLIConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+ for _, suite := range suites {
+ labels := fetchLabelsFromPackage(suite.Path)
+ if len(labels) == 0 {
+ fmt.Printf("%s: No labels found\n", suite.PackageName)
+ } else {
+ fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
+ }
+ }
+}
+
+func fetchLabelsFromPackage(packagePath string) []string {
+ fset := token.NewFileSet()
+ parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
+ command.AbortIfError("Failed to parse package source:", err)
+
+ files := []*ast.File{}
+ hasTestPackage := false
+ for key, pkg := range parsedPackages {
+ if strings.HasSuffix(key, "_test") {
+ hasTestPackage = true
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+ if !hasTestPackage {
+ for _, pkg := range parsedPackages {
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+
+ seen := map[string]bool{}
+ labels := []string{}
+ ispr := inspector.New(files)
+ ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
+ potentialLabels := fetchLabels(n.(*ast.CallExpr))
+ for _, label := range potentialLabels {
+ if !seen[label] {
+ seen[label] = true
+ labels = append(labels, strconv.Quote(label))
+ }
+ }
+ })
+
+ sort.Strings(labels)
+ return labels
+}
+
+func fetchLabels(callExpr *ast.CallExpr) []string {
+ out := []string{}
+ switch expr := callExpr.Fun.(type) {
+ case *ast.Ident:
+ if expr.Name != "Label" {
+ return out
+ }
+ case *ast.SelectorExpr:
+ if expr.Sel.Name != "Label" {
+ return out
+ }
+ default:
+ return out
+ }
+ for _, arg := range callExpr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
new file mode 100644
index 000000000..e9abb27d8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/build"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/generators"
+ "github.com/onsi/ginkgo/v2/ginkgo/labels"
+ "github.com/onsi/ginkgo/v2/ginkgo/outline"
+ "github.com/onsi/ginkgo/v2/ginkgo/run"
+ "github.com/onsi/ginkgo/v2/ginkgo/unfocus"
+ "github.com/onsi/ginkgo/v2/ginkgo/watch"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var program command.Program
+
+func GenerateCommands() []command.Command {
+ return []command.Command{
+ watch.BuildWatchCommand(),
+ build.BuildBuildCommand(),
+ generators.BuildBootstrapCommand(),
+ generators.BuildGenerateCommand(),
+ labels.BuildLabelsCommand(),
+ outline.BuildOutlineCommand(),
+ unfocus.BuildUnfocusCommand(),
+ BuildVersionCommand(),
+ }
+}
+
+func main() {
+ program = command.Program{
+ Name: "ginkgo",
+ Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
+ Commands: GenerateCommands(),
+ DefaultCommand: run.BuildRunCommand(),
+ DeprecatedCommands: []command.DeprecatedCommand{
+ {Name: "convert", Deprecation: types.Deprecations.Convert()},
+ {Name: "blur", Deprecation: types.Deprecations.Blur()},
+ {Name: "nodot", Deprecation: types.Deprecations.Nodot()},
+ },
+ }
+
+ program.RunAndExit(os.Args)
+}
+
+func BuildVersionCommand() command.Command {
+ return command.Command{
+ Name: "version",
+ Usage: "ginkgo version",
+ ShortDoc: "Print Ginkgo's version",
+ Command: func(_ []string, _ []string) {
+ fmt.Printf("Ginkgo Version %s\n", types.VERSION)
+ },
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
similarity index 73%
rename from vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
index ce6b7fcd7..0b9b19fe7 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
@@ -1,6 +1,7 @@
package outline
import (
+ "github.com/onsi/ginkgo/v2/types"
"go/ast"
"go/token"
"strconv"
@@ -25,9 +26,10 @@ type ginkgoMetadata struct {
// End is the position of first character immediately after the spec or container block
End int `json:"end"`
- Spec bool `json:"spec"`
- Focused bool `json:"focused"`
- Pending bool `json:"pending"`
+ Spec bool `json:"spec"`
+ Focused bool `json:"focused"`
+ Pending bool `json:"pending"`
+ Labels []string `json:"labels"`
}
// ginkgoNode is used to construct the outline as a tree
@@ -131,7 +133,7 @@ func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
// corresponding to a Ginkgo container or spec.
-func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName, tablePackageName *string) (*ginkgoNode, bool) {
+func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
if !ok {
return nil, false
@@ -142,56 +144,39 @@ func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackage
n.Start, n.End = absoluteOffsetsForNode(fset, ce)
n.Nodes = make([]*ginkgoNode, 0)
switch identName {
- case "It", "Measure", "Specify":
+ case "It", "Specify", "Entry":
n.Spec = true
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
- case "Entry":
- n.Spec = true
- n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
- return &n, tablePackageName != nil && *tablePackageName == packageName
- case "FIt", "FMeasure", "FSpecify":
+ case "FIt", "FSpecify", "FEntry":
n.Spec = true
n.Focused = true
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
- case "FEntry":
- n.Spec = true
- n.Focused = true
- n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
- return &n, tablePackageName != nil && *tablePackageName == packageName
- case "PIt", "PMeasure", "PSpecify", "XIt", "XMeasure", "XSpecify":
+ case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
n.Spec = true
n.Pending = true
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
- case "PEntry", "XEntry":
- n.Spec = true
- n.Pending = true
- n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
- return &n, tablePackageName != nil && *tablePackageName == packageName
- case "Context", "Describe", "When":
+ case "Context", "Describe", "When", "DescribeTable":
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
- case "DescribeTable":
- n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
- return &n, tablePackageName != nil && *tablePackageName == packageName
- case "FContext", "FDescribe", "FWhen":
+ case "FContext", "FDescribe", "FWhen", "FDescribeTable":
n.Focused = true
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
- case "FDescribeTable":
- n.Focused = true
- n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
- return &n, tablePackageName != nil && *tablePackageName == packageName
- case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen":
+ case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
n.Pending = true
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
- case "PDescribeTable", "XDescribeTable":
- n.Pending = true
- n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
- return &n, tablePackageName != nil && *tablePackageName == packageName
case "By":
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
@@ -241,3 +226,77 @@ func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
return text.Value, true
}
}
+
+func labelFromCallExpr(ce *ast.CallExpr) []string {
+
+ labels := []string{}
+ if len(ce.Args) < 2 {
+ return labels
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Label" {
+ ls := extractLabels(expr)
+ for _, label := range ls {
+ labels = append(labels, label)
+ }
+ }
+ }
+ }
+ return labels
+}
+
+func extractLabels(expr *ast.CallExpr) []string {
+ out := []string{}
+ for _, arg := range expr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+
+ return out
+}
+
+func pendingFromCallExpr(ce *ast.CallExpr) bool {
+
+ pending := false
+ if len(ce.Args) < 2 {
+ return pending
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Pending" {
+ pending = true
+ }
+ case *ast.Ident:
+ if expr.Name == "Pending" {
+ pending = true
+ }
+ }
+ }
+ return pending
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
similarity index 97%
rename from vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
index 4328ab391..67ec5ab75 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
@@ -47,7 +47,7 @@ func packageNameForImport(f *ast.File, path string) *string {
// or nil otherwise.
func importSpec(f *ast.File, path string) *ast.ImportSpec {
for _, s := range f.Imports {
- if importPath(s) == path {
+ if strings.HasPrefix(importPath(s), path) {
return s
}
}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
similarity index 79%
rename from vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
index 242e6a109..c2327cda8 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
@@ -12,18 +12,14 @@ import (
const (
// ginkgoImportPath is the well-known ginkgo import path
- ginkgoImportPath = "github.com/onsi/ginkgo"
-
- // tableImportPath is the well-known table extension import path
- tableImportPath = "github.com/onsi/ginkgo/extensions/table"
+ ginkgoImportPath = "github.com/onsi/ginkgo/v2"
)
// FromASTFile returns an outline for a Ginkgo test source file
func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
- tablePackageName := packageNameForImport(src, tableImportPath)
- if ginkgoPackageName == nil && tablePackageName == nil {
- return nil, fmt.Errorf("file does not import %q or %q", ginkgoImportPath, tableImportPath)
+ if ginkgoPackageName == nil {
+ return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
}
root := ginkgoNode{}
@@ -38,7 +34,7 @@ func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
// ast.CallExpr, this should never happen
panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
}
- gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName, tablePackageName)
+ gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
if !ok {
// Node is not a Ginkgo spec or container, continue
return true
@@ -89,12 +85,19 @@ func (o *outline) String() string {
// one 'width' of spaces for every level of nesting.
func (o *outline) StringIndent(width int) string {
var b strings.Builder
- b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n")
+ b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
currentIndent := 0
pre := func(n *ginkgoNode) {
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
- b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending))
+ var labels string
+ if len(n.Labels) == 1 {
+ labels = n.Labels[0]
+ } else {
+ labels = strings.Join(n.Labels, ", ")
+ }
+ //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
+ b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
currentIndent += width
}
post := func(n *ginkgoNode) {
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
new file mode 100644
index 000000000..36698d46a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
@@ -0,0 +1,98 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // indentWidth is the width used by the 'indent' output
+ indentWidth = 4
+ // stdinAlias is a portable alias for stdin. This convention is used in
+ // other CLIs, e.g., kubectl.
+ stdinAlias = "-"
+ usageCommand = "ginkgo outline "
+)
+
+type outlineConfig struct {
+ Format string
+}
+
+func BuildOutlineCommand() command.Command {
+ conf := outlineConfig{
+ Format: "csv",
+ }
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "format", KeyPath: "Format",
+ Usage: "Format of outline",
+ UsageArgument: "one of 'csv', 'indent', or 'json'",
+ UsageDefaultValue: conf.Format,
+ },
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "outline",
+ Usage: "ginkgo outline ",
+ ShortDoc: "Create an outline of Ginkgo symbols for a file",
+ Documentation: "To read from stdin, use: `ginkgo outline -`",
+ DocLink: "creating-an-outline-of-specs",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ outlineFile(args, conf.Format)
+ },
+ }
+}
+
+func outlineFile(args []string, format string) {
+ if len(args) != 1 {
+ command.AbortWithUsage("outline expects exactly one argument")
+ }
+
+ filename := args[0]
+ var src *os.File
+ if filename == stdinAlias {
+ src = os.Stdin
+ } else {
+ var err error
+ src, err = os.Open(filename)
+ command.AbortIfError("Failed to open file:", err)
+ }
+
+ fset := token.NewFileSet()
+
+ parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
+ command.AbortIfError("Failed to parse source:", err)
+
+ o, err := FromASTFile(fset, parsedSrc)
+ command.AbortIfError("Failed to create outline:", err)
+
+ var oerr error
+ switch format {
+ case "csv":
+ _, oerr = fmt.Print(o)
+ case "indent":
+ _, oerr = fmt.Print(o.StringIndent(indentWidth))
+ case "json":
+ b, err := json.Marshal(o)
+ if err != nil {
+ println(fmt.Sprintf("error marshalling to json: %s", err))
+ }
+ _, oerr = fmt.Println(string(b))
+ default:
+ command.AbortWith("Format %s not accepted", format)
+ }
+ command.AbortIfError("Failed to write outline:", oerr)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
new file mode 100644
index 000000000..aaed4d570
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -0,0 +1,232 @@
+package run
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildRunCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "run",
+ Flags: flags,
+ Usage: "ginkgo run -- ",
+ ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "running-tests",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ runner := &SpecRunner{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ runner.RunSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecRunner struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, r.cliConfig, true)
+ skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
+ suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(skippedSuites) > 0 {
+ fmt.Println("Will skip:")
+ for _, skippedSuite := range skippedSuites {
+ fmt.Println(" " + skippedSuite.Path)
+ }
+ }
+
+ if len(skippedSuites) > 0 && len(suites) == 0 {
+ command.AbortGracefullyWith("All tests skipped! Exiting...")
+ }
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
+ r.reporterConfig.Succinct = true
+ }
+
+ t := time.Now()
+ var endTime time.Time
+ if r.suiteConfig.Timeout > 0 {
+ endTime = t.Add(r.suiteConfig.Timeout)
+ }
+
+ iteration := 0
+OUTER_LOOP:
+ for {
+ if !r.flags.WasSet("seed") {
+ r.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+ if r.cliConfig.RandomizeSuites && len(suites) > 1 {
+ suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
+ }
+
+ opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, r.goFlagsConfig)
+
+ SUITE_LOOP:
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break SUITE_LOOP
+ }
+ suites[suiteIdx] = suite
+
+ if r.interruptHandler.Status().Interrupted() {
+ opc.StopAndDrain()
+ break OUTER_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
+ fmt.Printf("Skipping %s (no test files)\n", suite.Path)
+ continue SUITE_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suites[suiteIdx].CompilationError.Error())
+ if !r.cliConfig.KeepGoing {
+ opc.StopAndDrain()
+ }
+ continue SUITE_LOOP
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
+ suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+
+ if !endTime.IsZero() {
+ r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ if r.suiteConfig.Timeout <= 0 {
+ suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+ }
+
+ suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ if iteration > 0 {
+ fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
+ }
+ break OUTER_LOOP
+ }
+
+ if r.cliConfig.UntilItFails {
+ fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
+ } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
+ fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
+ } else {
+ break OUTER_LOOP
+ }
+ iteration += 1
+ }
+
+ internal.Cleanup(r.goFlagsConfig, suites...)
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+
+ fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
+ if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Printf("Test Suite Passed\n")
+ fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
+ command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
+ } else {
+ fmt.Printf("Test Suite Passed\n")
+ command.Abort(command.AbortDetails{})
+ }
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, "")
+ if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ fmt.Fprintln(formatter.ColorableStdOut,
+ internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
+ }
+ fmt.Printf("Test Suite Failed\n")
+ command.Abort(command.AbortDetails{ExitCode: 1})
+ }
+}
+
+func orcMessage(iteration int) string {
+ if iteration < 10 {
+ return ""
+ } else if iteration < 30 {
+ return []string{
+ "If at first you succeed...",
+ "...try, try again.",
+ "Looking good!",
+ "Still good...",
+ "I think your tests are fine....",
+ "Yep, still passing",
+ "Oh boy, here I go testin' again!",
+ "Even the gophers are getting bored",
+ "Did you try -race?",
+ "Maybe you should stop now?",
+ "I'm getting tired...",
+ "What if I just made you a sandwich?",
+ "Hit ^C, hit ^C, please hit ^C",
+ "Make it stop. Please!",
+ "Come on! Enough is enough!",
+ "Dave, this conversation can serve no purpose anymore. Goodbye.",
+ "Just what do you think you're doing, Dave? ",
+ "I, Sisyphus",
+ "Insanity: doing the same thing over and over again and expecting different results. -Einstein",
+ "I guess Einstein never tried to churn butter",
+ }[iteration-10] + "\n"
+ } else {
+ return "No, seriously... you can probably stop now.\n"
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
similarity index 67%
rename from vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
index d9dfb6e44..7dd294394 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
@@ -1,34 +1,33 @@
-package main
+package unfocus
import (
"bytes"
- "flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
)
-func BuildUnfocusCommand() *Command {
- return &Command{
- Name: "unfocus",
- AltName: "blur",
- FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError),
- UsageCommand: "ginkgo unfocus (or ginkgo blur)",
- Usage: []string{
- "Recursively unfocuses any focused tests under the current directory",
+func BuildUnfocusCommand() command.Command {
+ return command.Command{
+ Name: "unfocus",
+ Usage: "ginkgo unfocus",
+ ShortDoc: "Recursively unfocus any focused tests under the current directory",
+ DocLink: "filtering-specs",
+ Command: func(_ []string, _ []string) {
+ unfocusSpecs()
},
- Command: unfocusSpecs,
}
}
-func unfocusSpecs([]string, []string) {
+func unfocusSpecs() {
fmt.Println("Scanning for focus...")
goFiles := make(chan string)
@@ -54,7 +53,7 @@ func unfocusSpecs([]string, []string) {
}
func unfocusDir(goFiles chan string, path string) {
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
fmt.Println(err.Error())
return
@@ -79,13 +78,13 @@ func shouldProcessFile(basename string) bool {
}
func unfocusFile(path string) {
- data, err := ioutil.ReadFile(path)
+ data, err := os.ReadFile(path)
if err != nil {
fmt.Printf("error reading file '%s': %s\n", path, err.Error())
return
}
- ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), 0)
+ ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
if err != nil {
fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
return
@@ -112,7 +111,7 @@ func unfocusFile(path string) {
}
func writeBackup(path string, data []byte) (string, error) {
- t, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path))
+ t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
if err != nil {
return "", fmt.Errorf("error creating temporary file: %w", err)
@@ -126,7 +125,7 @@ func writeBackup(path string, data []byte) (string, error) {
return t.Name(), nil
}
-func updateFile(path string, data []byte, eliminations []int64) error {
+func updateFile(path string, data []byte, eliminations [][]int64) error {
to, err := os.Create(path)
if err != nil {
return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
@@ -135,14 +134,15 @@ func updateFile(path string, data []byte, eliminations []int64) error {
from := bytes.NewReader(data)
var cursor int64
- for _, byteToEliminate := range eliminations {
- if _, err := io.CopyN(to, from, byteToEliminate-cursor); err != nil {
+ for _, eliminationRange := range eliminations {
+ positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
+ if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
return fmt.Errorf("error copying data: %w", err)
}
- cursor = byteToEliminate + 1
+ cursor = positionToEliminate + lengthToEliminate
- if _, err := from.Seek(1, io.SeekCurrent); err != nil {
+ if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
return fmt.Errorf("error seeking to position in buffer: %w", err)
}
}
@@ -154,16 +154,22 @@ func updateFile(path string, data []byte, eliminations []int64) error {
return nil
}
-func scanForFocus(file *ast.File) (eliminations []int64) {
+func scanForFocus(file *ast.File) (eliminations [][]int64) {
ast.Inspect(file, func(n ast.Node) bool {
if c, ok := n.(*ast.CallExpr); ok {
if i, ok := c.Fun.(*ast.Ident); ok {
if isFocus(i.Name) {
- eliminations = append(eliminations, int64(i.Pos()-file.Pos()))
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
}
}
}
+ if i, ok := n.(*ast.Ident); ok {
+ if i.Name == "Focus" {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
+ }
+ }
+
return true
})
@@ -172,7 +178,7 @@ func scanForFocus(file *ast.File) (eliminations []int64) {
func isFocus(name string) bool {
switch name {
- case "FDescribe", "FContext", "FIt", "FMeasure", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
+ case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
return true
default:
return false
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
similarity index 100%
rename from vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
similarity index 85%
rename from vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
index a628303d7..26418ac62 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
@@ -5,10 +5,10 @@ import (
"regexp"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
)
-type SuiteErrors map[testsuite.TestSuite]error
+type SuiteErrors map[internal.TestSuite]error
type DeltaTracker struct {
maxDepth int
@@ -26,7 +26,7 @@ func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
}
}
-func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
+func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
errors = SuiteErrors{}
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
@@ -65,7 +65,7 @@ func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors
return delta, errors
}
-func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
+func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
s, ok := d.suites[suite.Path]
if !ok {
return fmt.Errorf("unknown suite %s", suite.Path)
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
similarity index 100%
rename from vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
similarity index 92%
rename from vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
index 67e2c1c32..e9f7ec0cb 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
@@ -2,7 +2,6 @@ package watch
import (
"fmt"
- "io/ioutil"
"os"
"regexp"
"time"
@@ -63,15 +62,20 @@ func (p *PackageHash) CheckForChanges() bool {
}
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
- infos, err := ioutil.ReadDir(p.path)
+ entries, err := os.ReadDir(p.path)
if err != nil {
deleted = true
return
}
- for _, info := range infos {
- if info.IsDir() {
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
continue
}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
similarity index 100%
rename from vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
similarity index 90%
rename from vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
index 5deaba7cb..53272df7e 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
@@ -5,18 +5,18 @@ import (
"math"
"time"
- "github.com/onsi/ginkgo/ginkgo/testsuite"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
)
type Suite struct {
- Suite testsuite.TestSuite
+ Suite internal.TestSuite
RunTime time.Time
Dependencies Dependencies
sharedPackageHashes *PackageHashes
}
-func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
+func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
deps, err := NewDependencies(suite.Path, maxDepth)
if err != nil {
return nil, err
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
new file mode 100644
index 000000000..bde4193ce
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -0,0 +1,192 @@
+package watch
+
+import (
+ "fmt"
+ "regexp"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildWatchCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "watch",
+ Flags: flags,
+ Usage: "ginkgo watch -- ",
+ ShortDoc: "Watch the passed in and runs their tests whenever changes occur.",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "watching-for-changes",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ watcher := &SpecWatcher{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ watcher.WatchSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecWatcher struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
+ deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
+ delta, errors := deltaTracker.Delta(suites)
+
+ fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
+ for _, suite := range delta.NewSuites {
+ fmt.Println(" " + suite.Description())
+ }
+
+ for suite, err := range errors {
+ fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
+ }
+
+ if len(suites) == 1 {
+ w.updateSeed()
+ w.compileAndRun(suites[0], additionalArgs)
+ }
+
+ ticker := time.NewTicker(time.Second)
+
+ for {
+ select {
+ case <-ticker.C:
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ delta, _ := deltaTracker.Delta(suites)
+ coloredStream := formatter.ColorableStdOut
+
+ suites = internal.TestSuites{}
+
+ if len(delta.NewSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
+ for _, suite := range delta.NewSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ }
+
+ modifiedSuites := delta.ModifiedSuites()
+ if len(modifiedSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
+ for _, pkg := range delta.ModifiedPackages {
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
+ }
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
+ for _, suite := range modifiedSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ fmt.Fprintln(coloredStream, "")
+ }
+
+ if len(suites) == 0 {
+ break
+ }
+
+ w.updateSeed()
+ w.computeSuccinctMode(len(suites))
+ for idx := range suites {
+ if w.interruptHandler.Status().Interrupted() {
+ return
+ }
+ deltaTracker.WillRun(suites[idx])
+ suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
+ }
+ color := "{{green}}"
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ color = "{{red}}"
+ }
+ fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+ case <-w.interruptHandler.Status().Channel:
+ return
+ }
+ }
+}
+
+func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
+ suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ return suite
+ }
+ if w.interruptHandler.Status().Interrupted() {
+ return suite
+ }
+ suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
+ internal.Cleanup(w.goFlagsConfig, suite)
+ return suite
+}
+
+func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
+ if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
+ w.reporterConfig.Succinct = false
+ return
+ }
+
+ if w.flags.WasSet("succinct") {
+ return
+ }
+
+ if numSuites == 1 {
+ w.reporterConfig.Succinct = false
+ }
+
+ if numSuites > 1 {
+ w.reporterConfig.Succinct = true
+ }
+}
+
+func (w *SpecWatcher) updateSeed() {
+ if !w.flags.WasSet("seed") {
+ w.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
new file mode 100644
index 000000000..85162720f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
@@ -0,0 +1,8 @@
+//go:build ginkgoclidependencies
+// +build ginkgoclidependencies
+
+package ginkgo
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo"
+)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
new file mode 100644
index 000000000..28447ffdd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -0,0 +1,94 @@
+package ginkgo
+
+import (
+ "github.com/onsi/ginkgo/v2/internal/testingtproxy"
+)
+
+/*
+GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo.
+
+GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface.
+
+GinkgoT() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+
+You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
+*/
+func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
+ offset := 3
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return testingtproxy.New(
+ GinkgoWriter,
+ Fail,
+ Skip,
+ DeferCleanup,
+ CurrentSpecReport,
+ AddReportEntry,
+ GinkgoRecover,
+ AttachProgressReporter,
+ suiteConfig.RandomSeed,
+ suiteConfig.ParallelProcess,
+ suiteConfig.ParallelTotal,
+ reporterConfig.NoColor,
+ offset)
+}
+
+/*
+The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T.
+*/
+type GinkgoTInterface interface {
+ Cleanup(func())
+ Setenv(kev, value string)
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fail()
+ FailNow()
+ Failed() bool
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Helper()
+ Log(args ...interface{})
+ Logf(format string, args ...interface{})
+ Name() string
+ Parallel()
+ Skip(args ...interface{})
+ SkipNow()
+ Skipf(format string, args ...interface{})
+ Skipped() bool
+ TempDir() string
+}
+
+/*
+Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo
+*/
+type FullGinkgoTInterface interface {
+ GinkgoTInterface
+
+ AddReportEntryVisibilityAlways(name string, args ...any)
+ AddReportEntryVisibilityFailureOrVerbose(name string, args ...any)
+ AddReportEntryVisibilityNever(name string, args ...any)
+
+ //Prints to the GinkgoWriter
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+
+ //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
+ F(format string, args ...any) string
+ Fi(indentation uint, format string, args ...any) string
+ Fiw(indentation uint, maxWidth uint, format string, args ...any) string
+
+ //Generates a formatted string version of the current spec's timeline
+ RenderTimeline() string
+
+ GinkgoRecover()
+ DeferCleanup(args ...any)
+
+ RandomSeed() int64
+ ParallelProcess() int
+ ParallelTotal() int
+
+ AttachProgressReporter(func() string) func()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
new file mode 100644
index 000000000..712d85afb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
@@ -0,0 +1,9 @@
+package internal
+
+func MakeIncrementingIndexCounter() func() (int, error) {
+ idx := -1
+ return func() (int, error) {
+ idx += 1
+ return idx, nil
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
similarity index 61%
rename from vendor/github.com/onsi/ginkgo/internal/failer/failer.go
rename to vendor/github.com/onsi/ginkgo/v2/internal/failer.go
index 678ea2514..e9bd9565f 100644
--- a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -1,32 +1,44 @@
-package failer
+package internal
import (
"fmt"
"sync"
- "github.com/onsi/ginkgo/types"
+ "github.com/onsi/ginkgo/v2/types"
)
type Failer struct {
lock *sync.Mutex
- failure types.SpecFailure
+ failure types.Failure
state types.SpecState
}
-func New() *Failer {
+func NewFailer() *Failer {
return &Failer{
lock: &sync.Mutex{},
state: types.SpecStatePassed,
}
}
+func (f *Failer) GetState() types.SpecState {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.state
+}
+
+func (f *Failer) GetFailure() types.Failure {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.failure
+}
+
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStatePanicked
- f.failure = types.SpecFailure{
+ f.failure = types.Failure{
Message: "Test Panicked",
Location: location,
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
@@ -34,59 +46,54 @@ func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{})
}
}
-func (f *Failer) Timeout(location types.CodeLocation) {
+func (f *Failer) Fail(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
- f.state = types.SpecStateTimedOut
- f.failure = types.SpecFailure{
- Message: "Timed out",
+ f.state = types.SpecStateFailed
+ f.failure = types.Failure{
+ Message: message,
Location: location,
}
}
}
-func (f *Failer) Fail(message string, location types.CodeLocation) {
+func (f *Failer) Skip(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
- f.state = types.SpecStateFailed
- f.failure = types.SpecFailure{
+ f.state = types.SpecStateSkipped
+ f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
-func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
+func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
- failure := f.failure
- outcome := f.state
- if outcome != types.SpecStatePassed {
- failure.ComponentType = componentType
- failure.ComponentIndex = componentIndex
- failure.ComponentCodeLocation = componentCodeLocation
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateAborted
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
}
-
- f.state = types.SpecStatePassed
- f.failure = types.SpecFailure{}
-
- return failure, outcome
}
-func (f *Failer) Skip(message string, location types.CodeLocation) {
+func (f *Failer) Drain() (types.SpecState, types.Failure) {
f.lock.Lock()
defer f.lock.Unlock()
- if f.state == types.SpecStatePassed {
- f.state = types.SpecStateSkipped
- f.failure = types.SpecFailure{
- Message: message,
- Location: location,
- }
- }
+ failure := f.failure
+ outcome := f.state
+
+ f.state = types.SpecStatePassed
+ f.failure = types.Failure{}
+
+ return outcome, failure
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
new file mode 100644
index 000000000..966ea0c1a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
@@ -0,0 +1,125 @@
+package internal
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+ If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
+ unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
+ It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
+ this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
+
+ As a common example, consider:
+
+ FDescribe("something to debug", function() {
+ It("works", function() {...})
+ It("works", function() {...})
+ FIt("doesn't work", function() {...})
+ It("works", function() {...})
+ })
+
+ here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
+ The nested policy applied by this function enables this behavior.
+*/
+func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
+ var walkTree func(tree *TreeNode) bool
+ walkTree = func(tree *TreeNode) bool {
+ if tree.Node.MarkedPending {
+ return false
+ }
+ hasFocusedDescendant := false
+ for _, child := range tree.Children {
+ childHasFocus := walkTree(child)
+ hasFocusedDescendant = hasFocusedDescendant || childHasFocus
+ }
+ tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
+ return tree.Node.MarkedFocus || hasFocusedDescendant
+ }
+
+ walkTree(tree)
+}
+
+/*
+ Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
+ It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text
+ and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
+
+ If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters.
+
+ This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
+ - If there are no CLI arguments and no programmatic focus, do nothing.
+ - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus.
+ - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
+
+ *Note:* specs with pending nodes are Skipped when created by NewSpec.
+*/
+func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
+ focusString := strings.Join(suiteConfig.FocusStrings, "|")
+ skipString := strings.Join(suiteConfig.SkipStrings, "|")
+
+ hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != ""
+
+ type SkipCheck func(spec Spec) bool
+
+ // by default, skip any specs marked pending
+ skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
+ hasProgrammaticFocus := false
+
+ if !hasFocusCLIFlags {
+ // check for programmatic focus
+ for _, spec := range specs {
+ if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
+ hasProgrammaticFocus = true
+ break
+ }
+ }
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
+ skipChecks = append(skipChecks, func(spec Spec) bool {
+ return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
+ })
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if focusString != "" {
+ // skip specs that don't match the focus string
+ re := regexp.MustCompile(focusString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
+ }
+
+ if skipString != "" {
+ // skip specs that match the skip string
+ re := regexp.MustCompile(skipString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
+ }
+
+ // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
+ processedSpecs := Specs{}
+ for _, spec := range specs {
+ for _, skipCheck := range skipChecks {
+ if skipCheck(spec) {
+ spec.Skip = true
+ break
+ }
+ }
+ processedSpecs = append(processedSpecs, spec)
+ }
+
+ return processedSpecs, hasProgrammaticFocus
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
new file mode 100644
index 000000000..f2c0fd89c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
@@ -0,0 +1,17 @@
+package global
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+var Suite *internal.Suite
+var Failer *internal.Failer
+
+func init() {
+ InitializeGlobals()
+}
+
+func InitializeGlobals() {
+ Failer = internal.NewFailer()
+ Suite = internal.NewSuite()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
new file mode 100644
index 000000000..ae1b7b011
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -0,0 +1,380 @@
+package internal
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type runOncePair struct {
+ //nodeId should only run once...
+ nodeID uint
+ nodeType types.NodeType
+ //...for specs in a hierarchy that includes this context
+ containerID uint
+}
+
+func (pair runOncePair) isZero() bool {
+ return pair.nodeID == 0
+}
+
+func runOncePairForNode(node Node, containerID uint) runOncePair {
+ return runOncePair{
+ nodeID: node.ID,
+ nodeType: node.NodeType,
+ containerID: containerID,
+ }
+}
+
+type runOncePairs []runOncePair
+
+func runOncePairsForSpec(spec Spec) runOncePairs {
+ pairs := runOncePairs{}
+
+ containers := spec.Nodes.WithType(types.NodeTypeContainer)
+ for _, node := range spec.Nodes {
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
+ } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
+ passedIntoAnOrderedContainer := false
+ firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
+ passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
+ return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
+ })
+ if firstOrderedContainerDeeperThanNode.IsZero() {
+ continue
+ }
+ pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
+ }
+ }
+
+ return pairs
+}
+
+func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
+ for i := range pairs {
+ if pairs[i].nodeID == nodeID {
+ return pairs[i]
+ }
+ }
+ return runOncePair{}
+}
+
+func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
+ for i := range pairs {
+ if pairs[i] == pair {
+ return true
+ }
+ }
+ return false
+}
+
+func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
+ count := 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(runOncePairs, count), 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ out[j] = pairs[i]
+ j++
+ }
+ }
+ return out
+}
+
+type group struct {
+ suite *Suite
+ specs Specs
+ runOncePairs map[uint]runOncePairs
+ runOnceTracker map[runOncePair]types.SpecState
+
+ succeeded bool
+ failedInARunOnceBefore bool
+ continueOnFailure bool
+}
+
+func newGroup(suite *Suite) *group {
+ return &group{
+ suite: suite,
+ runOncePairs: map[uint]runOncePairs{},
+ runOnceTracker: map[runOncePair]types.SpecState{},
+ succeeded: true,
+ failedInARunOnceBefore: false,
+ continueOnFailure: false,
+ }
+}
+
+func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
+ return types.SpecReport{
+ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
+ ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
+ ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
+ LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
+ LeafNodeType: types.NodeTypeIt,
+ LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
+ LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
+ ParallelProcess: g.suite.config.ParallelProcess,
+ RunningInParallel: g.suite.isRunningInParallel(),
+ IsSerial: spec.Nodes.HasNodeMarkedSerial(),
+ IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
+ MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
+ MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ }
+}
+
+func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
+ if spec.Nodes.HasNodeMarkedPending() {
+ return types.SpecStatePending, types.Failure{}
+ }
+ if spec.Skip {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.succeeded && !g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because an earlier spec in an ordered container failed")
+ }
+ if g.failedInARunOnceBefore && g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because a BeforeAll node failed")
+ }
+ beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
+ for _, pair := range beforeOncePairs {
+ if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
+ }
+ }
+ if g.suite.config.DryRun {
+ return types.SpecStatePassed, types.Failure{}
+ }
+ return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
+}
+
+func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
+ lastSpecID := uint(0)
+ for idx := range g.specs {
+ if g.specs[idx].Skip {
+ continue
+ }
+ sID := g.specs[idx].SubjectID()
+ if g.runOncePairs[sID].hasRunOncePair(pair) {
+ lastSpecID = sID
+ }
+ }
+ return lastSpecID == specID
+}
+
+func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
+ failedInARunOnceBefore := false
+ pairs := g.runOncePairs[spec.SubjectID()]
+
+ nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
+ nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
+ terminatingNode, terminatingPair := Node{}, runOncePair{}
+
+ deadline := time.Time{}
+ if spec.SpecTimeout() > 0 {
+ deadline = time.Now().Add(spec.SpecTimeout())
+ }
+
+ for _, node := range nodes {
+ oncePair := pairs.runOncePairFor(node.ID)
+ if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
+ continue
+ }
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if !oncePair.isZero() {
+ g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
+ }
+ if g.suite.currentSpecReport.State != types.SpecStatePassed {
+ terminatingNode, terminatingPair = node, oncePair
+ failedInARunOnceBefore = !terminatingPair.isZero()
+ break
+ }
+ }
+
+ afterNodeWasRun := map[uint]bool{}
+ includeDeferCleanups := false
+ for {
+ nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
+ nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
+ if !terminatingNode.IsZero() {
+ nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
+ }
+ if includeDeferCleanups {
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
+ }
+ nodes = nodes.Filter(func(node Node) bool {
+ if afterNodeWasRun[node.ID] {
+ //this node has already been run on this attempt, don't rerun it
+ return false
+ }
+ var pair runOncePair
+ switch node.NodeType {
+ case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
+ // check if we were generated in an AfterNode that has already run
+ if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
+ return true // we were, so we should definitely run this cleanup now
+ }
+ // looks like this cleanup nodes was generated by a before node or it.
+ // the run-once status of a cleanup node is governed by the run-once status of its generator
+ pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
+ default:
+ pair = pairs.runOncePairFor(node.ID)
+ }
+ if pair.isZero() {
+ // this node is not governed by any run-once policy, we should run it
+ return true
+ }
+ // it's our last chance to run if we're the last spec for our oncePair
+ isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
+
+ switch g.suite.currentSpecReport.State {
+ case types.SpecStatePassed: //this attempt is passing...
+ return isLastSpecWithPair //...we should run-once if we'this is our last chance
+ case types.SpecStateSkipped: //the spec was skipped by the user...
+ if isLastSpecWithPair {
+ return true //...we're the last spec, so we should run the AfterNode
+ }
+ if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
+ return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
+ }
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
+ if isFinalAttempt {
+ if g.continueOnFailure {
+ return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
+ } else {
+ return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
+ }
+ }
+ if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
+ return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
+ } else {
+ return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
+ }
+ }
+ case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
+ return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
+ }
+ return false
+ })
+
+ if len(nodes) == 0 && includeDeferCleanups {
+ break
+ }
+
+ for _, node := range nodes {
+ afterNodeWasRun[node.ID] = true
+ state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
+ g.suite.currentSpecReport.State = state
+ g.suite.currentSpecReport.Failure = failure
+ } else if state.Is(types.SpecStateFailureStates) {
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure})
+ }
+ }
+ includeDeferCleanups = true
+ }
+
+ return failedInARunOnceBefore
+}
+
+func (g *group) run(specs Specs) {
+ g.specs = specs
+ g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
+ for _, spec := range g.specs {
+ g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
+ }
+
+ for _, spec := range g.specs {
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = g.initialReportForSpec(spec)
+ g.suite.selectiveLock.Unlock()
+
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
+ g.suite.reporter.WillRun(g.suite.currentSpecReport)
+ g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
+
+ skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
+
+ g.suite.currentSpecReport.StartTime = time.Now()
+ failedInARunOnceBefore := false
+ if !skip {
+ var maxAttempts = 1
+
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ maxAttempts = max(1, spec.MustPassRepeatedly())
+ } else if g.suite.config.FlakeAttempts > 0 {
+ maxAttempts = g.suite.config.FlakeAttempts
+ g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts
+ } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ maxAttempts = max(1, spec.FlakeAttempts())
+ }
+
+ for attempt := 0; attempt < maxAttempts; attempt++ {
+ g.suite.currentSpecReport.NumAttempts = attempt + 1
+ g.suite.writer.Truncate()
+ g.suite.outputInterceptor.StartInterceptingOutput()
+ if attempt > 0 {
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
+ }
+ }
+
+ failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
+
+ g.suite.currentSpecReport.EndTime = time.Now()
+ g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
+ g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
+ g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
+
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) {
+ break
+ }
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
+ break
+ } else if attempt < maxAttempts-1 {
+ af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
+ af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
+ }
+ }
+ }
+ }
+
+ g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
+ g.suite.processCurrentSpecReport()
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ g.succeeded = false
+ g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
+ }
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = types.SpecReport{}
+ g.suite.selectiveLock.Unlock()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
new file mode 100644
index 000000000..8ed86111f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -0,0 +1,177 @@
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+)
+
+var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
+
+type InterruptCause uint
+
+const (
+ InterruptCauseInvalid InterruptCause = iota
+ InterruptCauseSignal
+ InterruptCauseAbortByOtherProcess
+)
+
+type InterruptLevel uint
+
+const (
+ InterruptLevelUninterrupted InterruptLevel = iota
+ InterruptLevelCleanupAndReport
+ InterruptLevelReportOnly
+ InterruptLevelBailOut
+)
+
+func (ic InterruptCause) String() string {
+ switch ic {
+ case InterruptCauseSignal:
+ return "Interrupted by User"
+ case InterruptCauseAbortByOtherProcess:
+ return "Interrupted by Other Ginkgo Process"
+ }
+ return "INVALID_INTERRUPT_CAUSE"
+}
+
+type InterruptStatus struct {
+ Channel chan interface{}
+ Level InterruptLevel
+ Cause InterruptCause
+}
+
+func (s InterruptStatus) Interrupted() bool {
+ return s.Level != InterruptLevelUninterrupted
+}
+
+func (s InterruptStatus) Message() string {
+ return s.Cause.String()
+}
+
+func (s InterruptStatus) ShouldIncludeProgressReport() bool {
+ return s.Cause != InterruptCauseAbortByOtherProcess
+}
+
+type InterruptHandlerInterface interface {
+ Status() InterruptStatus
+}
+
+type InterruptHandler struct {
+ c chan interface{}
+ lock *sync.Mutex
+ level InterruptLevel
+ cause InterruptCause
+ client parallel_support.Client
+ stop chan interface{}
+ signals []os.Signal
+ requestAbortCheck chan interface{}
+}
+
+func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
+ if len(signals) == 0 {
+ signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
+ }
+ handler := &InterruptHandler{
+ c: make(chan interface{}),
+ lock: &sync.Mutex{},
+ stop: make(chan interface{}),
+ requestAbortCheck: make(chan interface{}),
+ client: client,
+ signals: signals,
+ }
+ handler.registerForInterrupts()
+ return handler
+}
+
+func (handler *InterruptHandler) Stop() {
+ close(handler.stop)
+}
+
+func (handler *InterruptHandler) registerForInterrupts() {
+ // os signal handling
+ signalChannel := make(chan os.Signal, 1)
+ signal.Notify(signalChannel, handler.signals...)
+
+ // cross-process abort handling
+ var abortChannel chan interface{}
+ if handler.client != nil {
+ abortChannel = make(chan interface{})
+ go func() {
+ pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
+ for {
+ select {
+ case <-pollTicker.C:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.requestAbortCheck:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.stop:
+ pollTicker.Stop()
+ return
+ }
+ }
+ }()
+ }
+
+ go func(abortChannel chan interface{}) {
+ var interruptCause InterruptCause
+ for {
+ select {
+ case <-signalChannel:
+ interruptCause = InterruptCauseSignal
+ case <-abortChannel:
+ interruptCause = InterruptCauseAbortByOtherProcess
+ case <-handler.stop:
+ signal.Stop(signalChannel)
+ return
+ }
+ abortChannel = nil
+
+ handler.lock.Lock()
+ oldLevel := handler.level
+ handler.cause = interruptCause
+ if handler.level == InterruptLevelUninterrupted {
+ handler.level = InterruptLevelCleanupAndReport
+ } else if handler.level == InterruptLevelCleanupAndReport {
+ handler.level = InterruptLevelReportOnly
+ } else if handler.level == InterruptLevelReportOnly {
+ handler.level = InterruptLevelBailOut
+ }
+ if handler.level != oldLevel {
+ close(handler.c)
+ handler.c = make(chan interface{})
+ }
+ handler.lock.Unlock()
+ }
+ }(abortChannel)
+}
+
+func (handler *InterruptHandler) Status() InterruptStatus {
+ handler.lock.Lock()
+ status := InterruptStatus{
+ Level: handler.level,
+ Channel: handler.c,
+ Cause: handler.cause,
+ }
+ handler.lock.Unlock()
+
+ if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
+ close(handler.requestAbortCheck)
+ <-status.Channel
+ return handler.Status()
+ }
+
+ return status
+}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
similarity index 64%
rename from vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
rename to vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
index 43c18544a..bf0de496d 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
@@ -1,6 +1,7 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
-package interrupthandler
+package interrupt_handler
import (
"os"
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
similarity index 54%
rename from vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
rename to vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
index 7f4a50e19..fcf8da833 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
@@ -1,6 +1,7 @@
+//go:build windows
// +build windows
-package interrupthandler
+package interrupt_handler
func SwallowSigQuit() {
//noop
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
new file mode 100644
index 000000000..14c7cf54e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -0,0 +1,922 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sort"
+ "time"
+
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _global_node_id_counter = uint(0)
+var _global_id_mutex = &sync.Mutex{}
+
+func UniqueNodeID() uint {
+ //There's a reace in the internal integration tests if we don't make
+ //accessing _global_node_id_counter safe across goroutines.
+ _global_id_mutex.Lock()
+ defer _global_id_mutex.Unlock()
+ _global_node_id_counter += 1
+ return _global_node_id_counter
+}
+
+type Node struct {
+ ID uint
+ NodeType types.NodeType
+
+ Text string
+ Body func(SpecContext)
+ CodeLocation types.CodeLocation
+ NestingLevel int
+ HasContext bool
+
+ SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte
+ SynchronizedBeforeSuiteProc1BodyHasContext bool
+ SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte)
+ SynchronizedBeforeSuiteAllProcsBodyHasContext bool
+
+ SynchronizedAfterSuiteAllProcsBody func(SpecContext)
+ SynchronizedAfterSuiteAllProcsBodyHasContext bool
+ SynchronizedAfterSuiteProc1Body func(SpecContext)
+ SynchronizedAfterSuiteProc1BodyHasContext bool
+
+ ReportEachBody func(types.SpecReport)
+ ReportSuiteBody func(types.Report)
+
+ MarkedFocus bool
+ MarkedPending bool
+ MarkedSerial bool
+ MarkedOrdered bool
+ MarkedContinueOnFailure bool
+ MarkedOncePerOrdered bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ Labels Labels
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ NodeTimeout time.Duration
+ SpecTimeout time.Duration
+ GracePeriod time.Duration
+
+ NodeIDWhereCleanupWasGenerated uint
+}
+
+// Decoration Types
+type focusType bool
+type pendingType bool
+type serialType bool
+type orderedType bool
+type continueOnFailureType bool
+type honorsOrderedType bool
+type suppressProgressReporting bool
+
+const Focus = focusType(true)
+const Pending = pendingType(true)
+const Serial = serialType(true)
+const Ordered = orderedType(true)
+const ContinueOnFailure = continueOnFailureType(true)
+const OncePerOrdered = honorsOrderedType(true)
+const SuppressProgressReporting = suppressProgressReporting(true)
+
+type FlakeAttempts uint
+type MustPassRepeatedly uint
+type Offset uint
+type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
+type Labels []string
+type PollProgressInterval time.Duration
+type PollProgressAfter time.Duration
+type NodeTimeout time.Duration
+type SpecTimeout time.Duration
+type GracePeriod time.Duration
+
+func (l Labels) MatchesLabelFilter(query string) bool {
+ return types.MustParseLabelFilter(query)(l)
+}
+
+func UnionOfLabels(labels ...Labels) Labels {
+ out := Labels{}
+ seen := map[string]bool{}
+ for _, labelSet := range labels {
+ for _, label := range labelSet {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
+ decorations := []interface{}{}
+ remainingArgs := []interface{}{}
+ for _, arg := range args {
+ if isDecoration(arg) {
+ decorations = append(decorations, arg)
+ } else {
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+ return decorations, remainingArgs
+}
+
+func isDecoration(arg interface{}) bool {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ return false
+ case t == reflect.TypeOf(Offset(0)):
+ return true
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ return true
+ case t == reflect.TypeOf(Focus):
+ return true
+ case t == reflect.TypeOf(Pending):
+ return true
+ case t == reflect.TypeOf(Serial):
+ return true
+ case t == reflect.TypeOf(Ordered):
+ return true
+ case t == reflect.TypeOf(ContinueOnFailure):
+ return true
+ case t == reflect.TypeOf(OncePerOrdered):
+ return true
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ return true
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ return true
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ return true
+ case t == reflect.TypeOf(Labels{}):
+ return true
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ return true
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ return true
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ return true
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ return true
+ case t == reflect.TypeOf(GracePeriod(0)):
+ return true
+ case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
+ return true
+ default:
+ return false
+ }
+}
+
+func isSliceOfDecorations(slice interface{}) bool {
+ vSlice := reflect.ValueOf(slice)
+ if vSlice.Len() == 0 {
+ return false
+ }
+ for i := 0; i < vSlice.Len(); i++ {
+ if !isDecoration(vSlice.Index(i).Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+ baseOffset := 2
+ node := Node{
+ ID: UniqueNodeID(),
+ NodeType: nodeType,
+ Text: text,
+ Labels: Labels{},
+ CodeLocation: types.NewCodeLocation(baseOffset),
+ NestingLevel: -1,
+ PollProgressAfter: -1,
+ PollProgressInterval: -1,
+ GracePeriod: -1,
+ }
+
+ errors := []error{}
+ appendError := func(err error) {
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ args = unrollInterfaceSlice(args)
+
+ remainingArgs := []interface{}{}
+ //First get the CodeLocation up-to-date
+ for _, arg := range args {
+ switch v := arg.(type) {
+ case Offset:
+ node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
+ case types.CodeLocation:
+ node.CodeLocation = v
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ labelsSeen := map[string]bool{}
+ trackedFunctionError := false
+ args = remainingArgs
+ remainingArgs = []interface{}{}
+ //now process the rest of the args
+ for _, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(float64(0)):
+ break //ignore deprecated timeouts
+ case t == reflect.TypeOf(Focus):
+ node.MarkedFocus = bool(arg.(focusType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
+ }
+ case t == reflect.TypeOf(Pending):
+ node.MarkedPending = bool(arg.(pendingType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
+ }
+ case t == reflect.TypeOf(Serial):
+ node.MarkedSerial = bool(arg.(serialType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
+ }
+ case t == reflect.TypeOf(Ordered):
+ node.MarkedOrdered = bool(arg.(orderedType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
+ }
+ case t == reflect.TypeOf(ContinueOnFailure):
+ node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
+ }
+ case t == reflect.TypeOf(OncePerOrdered):
+ node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
+ if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
+ }
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ node.FlakeAttempts = int(arg.(FlakeAttempts))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
+ }
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ node.MustPassRepeatedly = int(arg.(MustPassRepeatedly))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly"))
+ }
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
+ }
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
+ }
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ node.NodeTimeout = time.Duration(arg.(NodeTimeout))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout"))
+ }
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ node.SpecTimeout = time.Duration(arg.(SpecTimeout))
+ if !nodeType.Is(types.NodeTypeIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout"))
+ }
+ case t == reflect.TypeOf(GracePeriod(0)):
+ node.GracePeriod = time.Duration(arg.(GracePeriod))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
+ }
+ case t == reflect.TypeOf(Labels{}):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
+ }
+ for _, label := range arg.(Labels) {
+ if !labelsSeen[label] {
+ labelsSeen[label] = true
+ label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
+ node.Labels = append(node.Labels, label)
+ appendError(err)
+ }
+ }
+ case t.Kind() == reflect.Func:
+ if nodeType.Is(types.NodeTypeContainer) {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if t.NumOut() > 0 || t.NumIn() > 0 {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body := arg.(func())
+ node.Body = func(SpecContext) { body() }
+ } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
+ if node.ReportEachBody == nil {
+ node.ReportEachBody = arg.(func(types.SpecReport))
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ if node.ReportSuiteBody == nil {
+ node.ReportSuiteBody = arg.(func(types.Report))
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
+ if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedBeforeSuiteProc1Body == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext
+ } else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
+ if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedAfterSuiteAllProcsBody == nil {
+ node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext
+ } else if node.SynchronizedAfterSuiteProc1Body == nil {
+ node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext
+ }
+ } else {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if node.Body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ }
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ //validations
+ if node.MarkedPending && node.MarkedFocus {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
+ }
+
+ if node.MarkedContinueOnFailure && !node.MarkedOrdered {
+ appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
+ }
+
+ hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+
+ if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
+ appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
+ }
+
+ if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ for _, arg := range remainingArgs {
+ appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
+ }
+
+ if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType))
+ }
+
+ if len(errors) > 0 {
+ return Node{}, errors
+ }
+
+ return node, errors
+}
+
+var doneType = reflect.TypeOf(make(Done))
+
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+ t := reflect.TypeOf(arg)
+ if t.NumOut() > 0 || t.NumIn() > 1 {
+ return nil, false
+ }
+ if t.NumIn() == 1 {
+ if t.In(0) == doneType {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
+ deprecatedAsyncBody := arg.(func(Done))
+ return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false
+ } else if t.In(0).Implements(specContextType) {
+ return arg.(func(SpecContext)), true
+ } else if t.In(0).Implements(contextType) {
+ body := arg.(func(context.Context))
+ return func(c SpecContext) { body(c) }, true
+ }
+
+ return nil, false
+ }
+
+ body := arg.(func())
+ return func(SpecContext) { body() }, false
+}
+
+var byteType = reflect.TypeOf([]byte{})
+
+func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+
+ if t.NumOut() > 1 || t.NumIn() > 1 {
+ return nil, false
+ } else if t.NumOut() == 1 && t.Out(0) != byteType {
+ return nil, false
+ } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) {
+ return nil, false
+ }
+ hasContext := t.NumIn() == 1
+
+ return func(c SpecContext) []byte {
+ var out []reflect.Value
+ if hasContext {
+ out = v.Call([]reflect.Value{reflect.ValueOf(c)})
+ } else {
+ out = v.Call([]reflect.Value{})
+ }
+ if len(out) == 1 {
+ return (out[0].Interface()).([]byte)
+ } else {
+ return []byte{}
+ }
+ }, hasContext
+}
+
+func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+ hasContext, hasByte := false, false
+
+ if t.NumOut() > 0 || t.NumIn() > 2 {
+ return nil, false
+ } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType {
+ hasContext, hasByte = true, true
+ } else if t.NumIn() == 1 && t.In(0).Implements(contextType) {
+ hasContext = true
+ } else if t.NumIn() == 1 && t.In(0) == byteType {
+ hasByte = true
+ } else if t.NumIn() != 0 {
+ return nil, false
+ }
+
+ return func(c SpecContext, b []byte) {
+ in := []reflect.Value{}
+ if hasContext {
+ in = append(in, reflect.ValueOf(c))
+ }
+ if hasByte {
+ in = append(in, reflect.ValueOf(b))
+ }
+ v.Call(in)
+ }, hasContext
+}
+
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+ decorations, remainingArgs := PartitionDecorations(args...)
+ baseOffset := 2
+ cl := types.NewCodeLocation(baseOffset)
+ finalArgs := []interface{}{}
+ for _, arg := range decorations {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(Offset(0)):
+ cl = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ cl = arg.(types.CodeLocation)
+ default:
+ finalArgs = append(finalArgs, arg)
+ }
+ }
+ finalArgs = append(finalArgs, cl)
+
+ if len(remainingArgs) == 0 {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callback := reflect.ValueOf(remainingArgs[0])
+ if !(callback.Kind() == reflect.Func) {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callArgs := []reflect.Value{}
+ for _, arg := range remainingArgs[1:] {
+ callArgs = append(callArgs, reflect.ValueOf(arg))
+ }
+
+ hasContext := false
+ t := callback.Type()
+ if t.NumIn() > 0 {
+ if t.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) {
+ hasContext = true
+ }
+ }
+
+ handleFailure := func(out []reflect.Value) {
+ if len(out) == 0 {
+ return
+ }
+ last := out[len(out)-1]
+ if last.Type().Implements(errInterface) && !last.IsNil() {
+ fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl)
+ }
+ }
+
+ if hasContext {
+ finalArgs = append(finalArgs, func(c SpecContext) {
+ out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...))
+ handleFailure(out)
+ })
+ } else {
+ finalArgs = append(finalArgs, func() {
+ out := callback.Call(callArgs)
+ handleFailure(out)
+ })
+ }
+
+ return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
+}
+
+func (n Node) IsZero() bool {
+ return n.ID == 0
+}
+
+/* Nodes */
+type Nodes []Node
+
+func (n Nodes) CopyAppend(nodes ...Node) Nodes {
+ numN := len(n)
+ out := make(Nodes, numN+len(nodes))
+ for i, node := range n {
+ out[i] = node
+ }
+ for j, node := range nodes {
+ out[numN+j] = node
+ }
+ return out
+}
+
+func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
+ pivotIdx := len(n)
+ for i := range n {
+ if n[i].ID == pivot.ID {
+ pivotIdx = i
+ break
+ }
+ }
+ left := n[:pivotIdx]
+ right := Nodes{}
+ if pivotIdx+1 < len(n) {
+ right = n[pivotIdx+1:]
+ }
+
+ return left, right
+}
+
+func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
+ idxToExclude := len(n)
+ for i := range n {
+ if n[i].ID == nodeToExclude.ID {
+ idxToExclude = i
+ break
+ }
+ }
+ if idxToExclude == len(n) {
+ return n
+ }
+ out, j := make(Nodes, len(n)-1), 0
+ for i := range n {
+ if i == idxToExclude {
+ continue
+ }
+ out[j] = n[i]
+ j++
+ }
+ return out
+}
+
+func (n Nodes) Filter(filter func(Node) bool) Nodes {
+ trufa, count := make([]bool, len(n)), 0
+ for i := range n {
+ if filter(n[i]) {
+ trufa[i] = true
+ count += 1
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if trufa[i] {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
+ for i := range n {
+ if filter(n[i]) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ count++
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) SortedByDescendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel > out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) SortedByAscendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel < out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) FirstWithNestingLevel(level int) Node {
+ for i := range n {
+ if n[i].NestingLevel == level {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) Reverse() Nodes {
+ out := make(Nodes, len(n))
+ for i := range n {
+ out[len(n)-1-i] = n[i]
+ }
+ return out
+}
+
+func (n Nodes) Texts() []string {
+ out := make([]string, len(n))
+ for i := range n {
+ out[i] = n[i].Text
+ }
+ return out
+}
+
+func (n Nodes) Labels() [][]string {
+ out := make([][]string, len(n))
+ for i := range n {
+ if n[i].Labels == nil {
+ out[i] = []string{}
+ } else {
+ out[i] = []string(n[i].Labels)
+ }
+ }
+ return out
+}
+
+func (n Nodes) UnionOfLabels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for i := range n {
+ for _, label := range n[i].Labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func (n Nodes) CodeLocations() []types.CodeLocation {
+ out := make([]types.CodeLocation, len(n))
+ for i := range n {
+ out[i] = n[i].CodeLocation
+ }
+ return out
+}
+
+func (n Nodes) BestTextFor(node Node) string {
+ if node.Text != "" {
+ return node.Text
+ }
+ parentNestingLevel := node.NestingLevel - 1
+ for i := range n {
+ if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
+ return n[i].Text
+ }
+ }
+
+ return ""
+}
+
+func (n Nodes) ContainsNodeID(id uint) bool {
+ for i := range n {
+ if n[i].ID == id {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedPending() bool {
+ for i := range n {
+ if n[i].MarkedPending {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedFocus() bool {
+ for i := range n {
+ if n[i].MarkedFocus {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedSerial() bool {
+ for i := range n {
+ if n[i].MarkedSerial {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) FirstNodeMarkedOrdered() Node {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n Nodes) GetMaxFlakeAttempts() int {
+ maxFlakeAttempts := 0
+ for i := range n {
+ if n[i].FlakeAttempts > 0 {
+ maxFlakeAttempts = n[i].FlakeAttempts
+ }
+ }
+ return maxFlakeAttempts
+}
+
+func (n Nodes) GetMaxMustPassRepeatedly() int {
+ maxMustPassRepeatedly := 0
+ for i := range n {
+ if n[i].MustPassRepeatedly > 0 {
+ maxMustPassRepeatedly = n[i].MustPassRepeatedly
+ }
+ }
+ return maxMustPassRepeatedly
+}
+
+func unrollInterfaceSlice(args interface{}) []interface{} {
+ v := reflect.ValueOf(args)
+ if v.Kind() != reflect.Slice {
+ return []interface{}{args}
+ }
+ out := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ el := reflect.ValueOf(v.Index(i).Interface())
+ if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
+ out = append(out, unrollInterfaceSlice(el.Interface())...)
+ } else {
+ out = append(out, v.Index(i).Interface())
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
new file mode 100644
index 000000000..84eea0a59
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
@@ -0,0 +1,171 @@
+package internal
+
+import (
+ "math/rand"
+ "sort"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SortableSpecs struct {
+ Specs Specs
+ Indexes []int
+}
+
+func NewSortableSpecs(specs Specs) *SortableSpecs {
+ indexes := make([]int, len(specs))
+ for i := range specs {
+ indexes[i] = i
+ }
+ return &SortableSpecs{
+ Specs: specs,
+ Indexes: indexes,
+ }
+}
+func (s *SortableSpecs) Len() int { return len(s.Indexes) }
+func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
+func (s *SortableSpecs) Less(i, j int) bool {
+ a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
+
+ aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
+
+ firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
+ if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
+ // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
+ return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
+ }
+
+ // if either spec is in an ordered container - only use the nodes up to the outermost ordered container
+ if firstOrderedAIdx > -1 {
+ aNodes = aNodes[:firstOrderedAIdx+1]
+ }
+ if firstOrderedBIdx > -1 {
+ bNodes = bNodes[:firstOrderedBIdx+1]
+ }
+
+ for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
+ aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
+ if aCL.FileName != bCL.FileName {
+ return aCL.FileName < bCL.FileName
+ }
+ if aCL.LineNumber != bCL.LineNumber {
+ return aCL.LineNumber < bCL.LineNumber
+ }
+ }
+ // either everything is equal or we have different lengths of CLs
+ if len(aNodes) != len(bNodes) {
+ return len(aNodes) < len(bNodes)
+ }
+ // ok, now we are sure everything was equal. so we use the spec text to break ties
+ for i := 0; i < len(aNodes); i++ {
+ if aNodes[i].Text != bNodes[i].Text {
+ return aNodes[i].Text < bNodes[i].Text
+ }
+ }
+ // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
+ return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
+}
+
+type GroupedSpecIndices []SpecIndices
+type SpecIndices []int
+
+func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
+ /*
+ Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
+ order for a given seed across test runs.
+
+ By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
+ experience - specs within a given container run in the order they appear in the file.
+
+ Developers can set -randomizeAllSpecs to shuffle _all_ specs.
+
+ In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
+
+ Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
+ */
+
+ // Seed a new random source based on thee configured random seed.
+ r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
+
+ // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
+ sortableSpecs := NewSortableSpecs(specs)
+ sort.Sort(sortableSpecs)
+
+ // then we break things into execution groups
+ // a group represents a single unit of execution and is a collection of SpecIndices
+ // usually a group is just a single spec, however ordered containers must be preserved as a single group
+ executionGroupIDs := []uint{}
+ executionGroups := map[uint]SpecIndices{}
+ for _, idx := range sortableSpecs.Indexes {
+ spec := specs[idx]
+ groupNode := spec.Nodes.FirstNodeMarkedOrdered()
+ if groupNode.IsZero() {
+ groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
+ }
+ executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
+ if len(executionGroups[groupNode.ID]) == 1 {
+ executionGroupIDs = append(executionGroupIDs, groupNode.ID)
+ }
+ }
+
+ // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
+ // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
+ shufflableGroupingIDs := []uint{}
+ shufflableGroupingIDToGroupIDs := map[uint][]uint{}
+
+ // for each execution group we're going to have to pick a node to represent how the
+ // execution group is grouped for shuffling:
+ nodeTypesToShuffle := types.NodeTypesForContainerAndIt
+ if suiteConfig.RandomizeAllSpecs {
+ nodeTypesToShuffle = types.NodeTypeIt
+ }
+
+ //so, for each execution group:
+ for _, groupID := range executionGroupIDs {
+ // pick out a representative spec
+ representativeSpec := specs[executionGroups[groupID][0]]
+
+ // and grab the node on the spec that will represent which shufflable group this execution group belongs tu
+ shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
+
+ //add the execution group to its shufflable group
+ shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
+
+ //and if it's the first one in
+ if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
+ // record the shuffleable group ID
+ shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
+ }
+ }
+
+ // now we permute the sorted shufflable grouping IDs and build the ordered Groups
+ orderedGroups := GroupedSpecIndices{}
+ permutation := r.Perm(len(shufflableGroupingIDs))
+ for _, j := range permutation {
+ //let's get the execution group IDs for this shufflable group:
+ executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
+ // and we'll add their associated specindices to the orderedGroups slice:
+ for _, executionGroupID := range executionGroupIDsForJ {
+ orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
+ }
+ }
+
+ // If we're running in series, we're done.
+ if suiteConfig.ParallelTotal == 1 {
+ return orderedGroups, GroupedSpecIndices{}
+ }
+
+ // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
+ // The parallelizable groups will run across all Ginkgo processes...
+ // ...the serial groups will only run on Process #1 after all other processes have exited.
+ parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
+ for _, specIndices := range orderedGroups {
+ if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
+ serialGroups = append(serialGroups, specIndices)
+ } else {
+ parallelizableGroups = append(parallelizableGroups, specIndices)
+ }
+ }
+
+ return parallelizableGroups, serialGroups
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
new file mode 100644
index 000000000..4a1c09461
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -0,0 +1,250 @@
+package internal
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "time"
+)
+
+const BAILOUT_TIME = 1 * time.Second
+const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
+
+When running in parallel, Ginkgo captures stdout and stderr output
+and attaches it to the running spec. It looks like that process is getting
+stuck for this suite.
+
+This usually happens if you, or a library you are using, spin up an external
+process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
+causes the external process to keep Ginkgo's output interceptor pipe open and
+causes output interception to hang.
+
+Ginkgo has detected this and shortcircuited the capture process. The specs
+will continue running after this message however output from the external
+process that caused this issue will not be captured.
+
+You have several options to fix this. In preferred order they are:
+
+1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
+2. Ensure your process exits before the current spec completes. If your
+process is long-lived and must cross spec boundaries, this option won't
+work for you.
+3. Pause Ginkgo's output interceptor before starting your process and then
+resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
+to do this.
+4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
+turn off all output interception but allow specs to run in parallel without this
+issue. You may miss important output if you do this including output from Go's
+race detector.
+
+More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
+`
+
+/*
+The OutputInterceptor is used by to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+ StartInterceptingOutput()
+ StartInterceptingOutputAndForwardTo(io.Writer)
+ StopInterceptingAndReturnOutput() string
+
+ PauseIntercepting()
+ ResumeIntercepting()
+
+ Shutdown()
+}
+
+type NoopOutputInterceptor struct{}
+
+func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
+func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
+func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
+func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
+func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
+func (interceptor NoopOutputInterceptor) Shutdown() {}
+
+type pipePair struct {
+ reader *os.File
+ writer *os.File
+}
+
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+ for {
+ //make the next pipe...
+ pair := pipePair{}
+ pair.reader, pair.writer, _ = os.Pipe()
+ select {
+ //...and provide it to the next consumer (they are responsible for closing the files)
+ case pipeChannel <- pair:
+ continue
+ //...or close the files if we were told to shutdown
+ case <-shutdown:
+ pair.reader.Close()
+ pair.writer.Close()
+ return
+ }
+ }
+}
+
+type interceptorImplementation interface {
+ CreateStdoutStderrClones() (*os.File, *os.File)
+ ConnectPipeToStdoutStderr(*os.File)
+ RestoreStdoutStderrFromClones(*os.File, *os.File)
+ ShutdownClones(*os.File, *os.File)
+}
+
+type genericOutputInterceptor struct {
+ intercepting bool
+
+ stdoutClone *os.File
+ stderrClone *os.File
+ pipe pipePair
+
+ shutdown chan interface{}
+ emergencyBailout chan interface{}
+ pipeChannel chan pipePair
+ interceptedContent chan string
+
+ forwardTo io.Writer
+ accumulatedOutput string
+
+ implementation interceptorImplementation
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
+ interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.accumulatedOutput = ""
+ interceptor.forwardTo = w
+ interceptor.ResumeIntercepting()
+}
+
+func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
+ if interceptor.intercepting {
+ interceptor.PauseIntercepting()
+ }
+ return interceptor.accumulatedOutput
+}
+
+func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.intercepting = true
+ if interceptor.stdoutClone == nil {
+ interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
+ interceptor.shutdown = make(chan interface{})
+ go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
+ }
+
+ // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
+ // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
+ interceptor.pipe = <-interceptor.pipeChannel
+
+ interceptor.emergencyBailout = make(chan interface{})
+
+ //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
+ go func() {
+ buffer := &bytes.Buffer{}
+ destination := io.MultiWriter(buffer, interceptor.forwardTo)
+ copyFinished := make(chan interface{})
+ reader := interceptor.pipe.reader
+ go func() {
+ io.Copy(destination, reader)
+ reader.Close() // close the read end of the pipe so we don't leak a file descriptor
+ close(copyFinished)
+ }()
+ select {
+ case <-copyFinished:
+ interceptor.interceptedContent <- buffer.String()
+ case <-interceptor.emergencyBailout:
+ interceptor.interceptedContent <- ""
+ }
+ }()
+
+ interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
+}
+
+func (interceptor *genericOutputInterceptor) PauseIntercepting() {
+ if !interceptor.intercepting {
+ return
+ }
+ // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
+ // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
+ interceptor.pipe.writer.Close() // the pipewriter itself
+
+ // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
+ // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
+ interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
+
+ var content string
+ select {
+ case content = <-interceptor.interceptedContent:
+ case <-time.After(BAILOUT_TIME):
+ /*
+ By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
+ should eventually receive an EOF and exit.
+
+ **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
+ will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
+
+ That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
+ content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
+ and file descriptor (those these will be cleaned up when the process exits).
+
+ We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
+ */
+ close(interceptor.emergencyBailout)
+ content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
+ }
+
+ interceptor.accumulatedOutput += content
+ interceptor.intercepting = false
+}
+
+func (interceptor *genericOutputInterceptor) Shutdown() {
+ interceptor.PauseIntercepting()
+
+ if interceptor.stdoutClone != nil {
+ close(interceptor.shutdown)
+ interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
+ interceptor.stdoutClone = nil
+ interceptor.stderrClone = nil
+ }
+}
+
+/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
+func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &osGlobalReassigningOutputInterceptorImpl{},
+ }
+}
+
+type osGlobalReassigningOutputInterceptorImpl struct{}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ return os.Stdout, os.Stderr
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ os.Stdout = pipeWriter
+ os.Stderr = pipeWriter
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ os.Stdout = stdoutClone
+ os.Stderr = stderrClone
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
+ //noop
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
new file mode 100644
index 000000000..8a237f446
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -0,0 +1,73 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package internal
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &dupSyscallOutputInterceptorImpl{},
+ }
+}
+
+type dupSyscallOutputInterceptorImpl struct{}
+
+func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ // To clone stdout and stderr we:
+ // First, create two clone file descriptors that point to the stdout and stderr file descriptions
+ stdoutCloneFD, _ := unix.Dup(1)
+ stderrCloneFD, _ := unix.Dup(2)
+
+ // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
+ // https://github.com/onsi/ginkgo/issues/1191
+ flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+ flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+
+ // And then wrap the clone file descriptors in files.
+ // One benefit of this (that we don't use yet) is that we can actually write
+ // to these files to emit output to the console even though we're intercepting output
+ stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
+ stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
+
+ //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
+ //this speeds things up a bit, actually.
+ return stdoutClone, stderrClone
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
+ // to the write end of the pipe.
+ // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
+ // This effectively shunts data written to stdout and stderr to the write end of our pipe
+ unix.Dup2(int(pipeWriter.Fd()), 1)
+ unix.Dup2(int(pipeWriter.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
+ // point to the original file descriptions that we saved off in the clones.
+ // This has the added benefit of closing the connection between these descriptors and the write end of the pipe
+ // which is important to cause the io.Copy on the pipe.Reader to end.
+ unix.Dup2(int(stdoutClone.Fd()), 1)
+ unix.Dup2(int(stderrClone.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
+ // We're done with the clones so we can close them to clean up after ourselves
+ stdoutClone.Close()
+ stderrClone.Close()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
new file mode 100644
index 000000000..30c2851a8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return NewOSGlobalReassigningOutputInterceptor()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
new file mode 100644
index 000000000..b3cd64292
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -0,0 +1,72 @@
+package parallel_support
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type BeforeSuiteState struct {
+ Data []byte
+ State types.SpecState
+}
+
+type ParallelIndexCounter struct {
+ Index int
+}
+
+var ErrorGone = fmt.Errorf("gone")
+var ErrorFailed = fmt.Errorf("failed")
+var ErrorEarly = fmt.Errorf("early")
+
+var POLLING_INTERVAL = 50 * time.Millisecond
+
+type Server interface {
+ Start()
+ Close()
+ Address() string
+ RegisterAlive(node int, alive func() bool)
+ GetSuiteDone() chan interface{}
+ GetOutputDestination() io.Writer
+ SetOutputDestination(io.Writer)
+}
+
+type Client interface {
+ Connect() bool
+ Close() error
+
+ PostSuiteWillBegin(report types.Report) error
+ PostDidRun(report types.SpecReport) error
+ PostSuiteDidEnd(report types.Report) error
+ PostReportBeforeSuiteCompleted(state types.SpecState) error
+ BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
+ PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
+ BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
+ BlockUntilNonprimaryProcsHaveFinished() error
+ BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
+ FetchNextCounter() (int, error)
+ PostAbort() error
+ ShouldAbort() bool
+ PostEmitProgressReport(report types.ProgressReport) error
+ Write(p []byte) (int, error)
+}
+
+func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpServer(parallelTotal, reporter)
+ } else {
+ return newRPCServer(parallelTotal, reporter)
+ }
+}
+
+func NewClient(serverHost string) Client {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpClient(serverHost)
+ } else {
+ return newRPCClient(serverHost)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
new file mode 100644
index 000000000..6547c7a66
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -0,0 +1,169 @@
+package parallel_support
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type httpClient struct {
+ serverHost string
+}
+
+func newHttpClient(serverHost string) *httpClient {
+ return &httpClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *httpClient) Connect() bool {
+ resp, err := http.Get(client.serverHost + "/up")
+ if err != nil {
+ return false
+ }
+ resp.Body.Close()
+ return resp.StatusCode == http.StatusOK
+}
+
+func (client *httpClient) Close() error {
+ return nil
+}
+
+func (client *httpClient) post(path string, data interface{}) error {
+ var body io.Reader
+ if data != nil {
+ encoded, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ body = bytes.NewBuffer(encoded)
+ }
+ resp, err := http.Post(client.serverHost+path, "application/json", body)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func (client *httpClient) poll(path string, data interface{}) error {
+ for {
+ resp, err := http.Get(client.serverHost + path)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode == http.StatusTooEarly {
+ resp.Body.Close()
+ time.Sleep(POLLING_INTERVAL)
+ continue
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusGone {
+ return ErrorGone
+ }
+ if resp.StatusCode == http.StatusFailedDependency {
+ return ErrorFailed
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ if data != nil {
+ return json.NewDecoder(resp.Body).Decode(data)
+ }
+ return nil
+ }
+}
+
+func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
+ return client.post("/suite-will-begin", report)
+}
+
+func (client *httpClient) PostDidRun(report types.SpecReport) error {
+ return client.post("/did-run", report)
+}
+
+func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
+ return client.post("/suite-did-end", report)
+}
+
+func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.post("/progress-report", report)
+}
+
+func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.post("/report-before-suite-completed", state)
+}
+
+func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("/report-before-suite-state", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.post("/before-suite-completed", beforeSuiteState)
+}
+
+func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("/before-suite-state", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("/have-nonprimary-procs-finished", nil)
+}
+
+func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("/aggregated-nonprimary-procs-report", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *httpClient) FetchNextCounter() (int, error) {
+ var counter ParallelIndexCounter
+ err := client.poll("/counter", &counter)
+ return counter.Index, err
+}
+
+func (client *httpClient) PostAbort() error {
+ return client.post("/abort", nil)
+}
+
+func (client *httpClient) ShouldAbort() bool {
+ err := client.poll("/abort", nil)
+ if err == ErrorGone {
+ return true
+ }
+ return false
+}
+
+func (client *httpClient) Write(p []byte) (int, error) {
+ resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("failed to emit output")
+ }
+ return len(p), err
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
new file mode 100644
index 000000000..d2c71ab1b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -0,0 +1,242 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "encoding/json"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type httpServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+// Create a new server, automatically selecting a port
+func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &httpServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *httpServer) Start() {
+ httpServer := &http.Server{}
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+
+ //streaming endpoints
+ mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
+ mux.HandleFunc("/did-run", server.didRun)
+ mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
+ mux.HandleFunc("/emit-output", server.emitOutput)
+ mux.HandleFunc("/progress-report", server.emitProgressReport)
+
+ //synchronization endpoints
+ mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
+ mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
+ mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
+ mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
+ mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
+ mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
+ mux.HandleFunc("/counter", server.handleCounter)
+ mux.HandleFunc("/up", server.handleUp)
+ mux.HandleFunc("/abort", server.handleAbort)
+
+ go httpServer.Serve(server.listener)
+}
+
+// Stop the server
+func (server *httpServer) Close() {
+ server.listener.Close()
+}
+
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *httpServer) Address() string {
+ return "http://" + server.listener.Addr().String()
+}
+
+func (server *httpServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *httpServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *httpServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *httpServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
+
+//
+// Streaming Endpoints
+//
+
+// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+ defer request.Body.Close()
+ if json.NewDecoder(request.Body).Decode(object) != nil {
+ writer.WriteHeader(http.StatusBadRequest)
+ return false
+ }
+ return true
+}
+
+func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
+ if err == nil {
+ return false
+ }
+ switch err {
+ case ErrorEarly:
+ writer.WriteHeader(http.StatusTooEarly)
+ case ErrorGone:
+ writer.WriteHeader(http.StatusGone)
+ case ErrorFailed:
+ writer.WriteHeader(http.StatusFailedDependency)
+ default:
+ writer.WriteHeader(http.StatusInternalServerError)
+ }
+ return true
+}
+
+func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
+}
+
+func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
+ var report types.SpecReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.DidRun(report, voidReceiver), writer)
+}
+
+func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
+}
+
+func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
+ output, err := io.ReadAll(request.Body)
+ if err != nil {
+ writer.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ var n int
+ server.handleError(server.handler.EmitOutput(output, &n), writer)
+}
+
+func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
+ var report types.ProgressReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if !server.decode(writer, request, &state) {
+ return
+ }
+
+ server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(state)
+}
+
+func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if !server.decode(writer, request, &beforeSuiteState) {
+ return
+ }
+
+ server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
+}
+
+func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(beforeSuiteState)
+}
+
+func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
+ if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
+ return
+ }
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
+ var aggregatedReport types.Report
+ if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(aggregatedReport)
+}
+
+func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
+ var n int
+ if server.handleError(server.handler.Counter(voidSender, &n), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
+}
+
+func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
+ if request.Method == "GET" {
+ var shouldAbort bool
+ server.handler.ShouldAbort(voidSender, &shouldAbort)
+ if shouldAbort {
+ writer.WriteHeader(http.StatusGone)
+ } else {
+ writer.WriteHeader(http.StatusOK)
+ }
+ } else {
+ server.handler.Abort(voidSender, voidReceiver)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
new file mode 100644
index 000000000..59e8e6fd0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -0,0 +1,136 @@
+package parallel_support
+
+import (
+ "net/rpc"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type rpcClient struct {
+ serverHost string
+ client *rpc.Client
+}
+
+func newRPCClient(serverHost string) *rpcClient {
+ return &rpcClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *rpcClient) Connect() bool {
+ var err error
+ if client.client != nil {
+ return true
+ }
+ client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
+ if err != nil {
+ client.client = nil
+ return false
+ }
+ return true
+}
+
+func (client *rpcClient) Close() error {
+ return client.client.Close()
+}
+
+func (client *rpcClient) poll(method string, data interface{}) error {
+ for {
+ err := client.client.Call(method, voidSender, data)
+ if err == nil {
+ return nil
+ }
+ switch err.Error() {
+ case ErrorEarly.Error():
+ time.Sleep(POLLING_INTERVAL)
+ case ErrorGone.Error():
+ return ErrorGone
+ case ErrorFailed.Error():
+ return ErrorFailed
+ default:
+ return err
+ }
+ }
+}
+
+func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
+}
+
+func (client *rpcClient) PostDidRun(report types.SpecReport) error {
+ return client.client.Call("Server.DidRun", report, voidReceiver)
+}
+
+func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
+}
+
+func (client *rpcClient) Write(p []byte) (int, error) {
+ var n int
+ err := client.client.Call("Server.EmitOutput", p, &n)
+ return n, err
+}
+
+func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
+}
+
+func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("Server.ReportBeforeSuiteState", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *rpcClient) FetchNextCounter() (int, error) {
+ var counter int
+ err := client.client.Call("Server.Counter", voidSender, &counter)
+ return counter, err
+}
+
+func (client *rpcClient) PostAbort() error {
+ return client.client.Call("Server.Abort", voidSender, voidReceiver)
+}
+
+func (client *rpcClient) ShouldAbort() bool {
+ var shouldAbort bool
+ client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
+ return shouldAbort
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
new file mode 100644
index 000000000..2620fd562
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -0,0 +1,75 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/rpc"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+)
+
+/*
+RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type RPCServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+//Create a new server, automatically selecting a port
+func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &RPCServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *RPCServer) Start() {
+ rpcServer := rpc.NewServer()
+ rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
+
+ httpServer := &http.Server{}
+ httpServer.Handler = rpcServer
+
+ go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *RPCServer) Close() {
+ server.listener.Close()
+}
+
+//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *RPCServer) Address() string {
+ return server.listener.Addr().String()
+}
+
+func (server *RPCServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *RPCServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *RPCServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
new file mode 100644
index 000000000..a6d98793e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -0,0 +1,234 @@
+package parallel_support
+
+import (
+ "io"
+ "os"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Void struct{}
+
+var voidReceiver *Void = &Void{}
+var voidSender Void
+
+// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
+// It handles all the business logic to avoid duplication between the two servers
+
+type ServerHandler struct {
+ done chan interface{}
+ outputDestination io.Writer
+ reporter reporters.Reporter
+ alives []func() bool
+ lock *sync.Mutex
+ beforeSuiteState BeforeSuiteState
+ reportBeforeSuiteState types.SpecState
+ parallelTotal int
+ counter int
+ counterLock *sync.Mutex
+ shouldAbort bool
+
+ numSuiteDidBegins int
+ numSuiteDidEnds int
+ aggregatedReport types.Report
+ reportHoldingArea []types.SpecReport
+}
+
+func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
+ return &ServerHandler{
+ reporter: reporter,
+ lock: &sync.Mutex{},
+ counterLock: &sync.Mutex{},
+ alives: make([]func() bool, parallelTotal),
+ beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
+
+ parallelTotal: parallelTotal,
+ outputDestination: os.Stdout,
+ done: make(chan interface{}),
+ }
+}
+
+func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidBegins += 1
+
+ // all summaries are identical, so it's fine to simply emit the last one of these
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.SuiteWillBegin(report)
+
+ for _, summary := range handler.reportHoldingArea {
+ handler.reporter.WillRun(summary)
+ handler.reporter.DidRun(summary)
+ }
+
+ handler.reportHoldingArea = nil
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.WillRun(report)
+ handler.reporter.DidRun(report)
+ } else {
+ handler.reportHoldingArea = append(handler.reportHoldingArea, report)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidEnds += 1
+ if handler.numSuiteDidEnds == 1 {
+ handler.aggregatedReport = report
+ } else {
+ handler.aggregatedReport = handler.aggregatedReport.Add(report)
+ }
+
+ if handler.numSuiteDidEnds == handler.parallelTotal {
+ handler.reporter.SuiteDidEnd(handler.aggregatedReport)
+ close(handler.done)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
+ var err error
+ *n, err = handler.outputDestination.Write(output)
+ return err
+}
+
+func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reporter.EmitProgressReport(report)
+ return nil
+}
+
+func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.alives[proc-1] = alive
+}
+
+func (handler *ServerHandler) procIsAlive(proc int) bool {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ alive := handler.alives[proc-1]
+ if alive == nil {
+ return true
+ }
+ return alive()
+}
+
+func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
+ for i := 2; i <= handler.parallelTotal; i++ {
+ if handler.procIsAlive(i) {
+ return false
+ }
+ }
+ return true
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reportBeforeSuiteState = reportBeforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.reportBeforeSuiteState == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *reportBeforeSuiteState = handler.reportBeforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.beforeSuiteState = beforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.beforeSuiteState.State == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *beforeSuiteState = handler.beforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
+ if handler.haveNonprimaryProcsFinished() {
+ return nil
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
+ if handler.haveNonprimaryProcsFinished() {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.numSuiteDidEnds == handler.parallelTotal-1 {
+ *report = handler.aggregatedReport
+ return nil
+ } else {
+ return ErrorGone
+ }
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) Counter(_ Void, counter *int) error {
+ handler.counterLock.Lock()
+ defer handler.counterLock.Unlock()
+ *counter = handler.counter
+ handler.counter++
+ return nil
+}
+
+func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.shouldAbort = true
+ return nil
+}
+
+func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ *shouldAbort = handler.shouldAbort
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
new file mode 100644
index 000000000..11269cf1f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
@@ -0,0 +1,287 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _SOURCE_CACHE = map[string][]string{}
+
+type ProgressSignalRegistrar func(func()) context.CancelFunc
+
+func RegisterForProgressSignal(handler func()) context.CancelFunc {
+ signalChannel := make(chan os.Signal, 1)
+ if len(PROGRESS_SIGNALS) > 0 {
+ signal.Notify(signalChannel, PROGRESS_SIGNALS...)
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ for {
+ select {
+ case <-signalChannel:
+ handler()
+ case <-ctx.Done():
+ signal.Stop(signalChannel)
+ return
+ }
+ }
+ }()
+
+ return cancel
+}
+
+type ProgressStepCursor struct {
+ Text string
+ CodeLocation types.CodeLocation
+ StartTime time.Time
+}
+
+func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
+ pr := types.ProgressReport{
+ ParallelProcess: report.ParallelProcess,
+ RunningInParallel: isRunningInParallel,
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ LeafNodeText: report.LeafNodeText,
+ LeafNodeLocation: report.LeafNodeLocation,
+ SpecStartTime: report.StartTime,
+
+ CurrentNodeType: currentNode.NodeType,
+ CurrentNodeText: currentNode.Text,
+ CurrentNodeLocation: currentNode.CodeLocation,
+ CurrentNodeStartTime: currentNodeStartTime,
+
+ CurrentStepText: currentStep.Message,
+ CurrentStepLocation: currentStep.CodeLocation,
+ CurrentStepStartTime: currentStep.TimelineLocation.Time,
+
+ AdditionalReports: additionalReports,
+
+ CapturedGinkgoWriterOutput: gwOutput,
+ TimelineLocation: timelineLocation,
+ }
+
+ goroutines, err := extractRunningGoroutines()
+ if err != nil {
+ return pr, err
+ }
+ pr.Goroutines = goroutines
+
+ // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
+ packagesOfInterest := map[string]bool{}
+ packageFromFilename := func(filename string) string {
+ return filepath.Dir(filename)
+ }
+ addPackageFor := func(filename string) {
+ if filename != "" {
+ packagesOfInterest[packageFromFilename(filename)] = true
+ }
+ }
+ isPackageOfInterest := func(filename string) bool {
+ stackPackage := packageFromFilename(filename)
+ for packageOfInterest := range packagesOfInterest {
+ if strings.HasPrefix(stackPackage, packageOfInterest) {
+ return true
+ }
+ }
+ return false
+ }
+ for _, location := range report.ContainerHierarchyLocations {
+ addPackageFor(location.FileName)
+ }
+ addPackageFor(report.LeafNodeLocation.FileName)
+ addPackageFor(currentNode.CodeLocation.FileName)
+ addPackageFor(currentStep.CodeLocation.FileName)
+
+ //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
+ specGoRoutineIdx := -1
+ runNodeFunctionCallIdx := -1
+OUTER:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
+ specGoRoutineIdx = goroutineIdx
+ runNodeFunctionCallIdx = functionCallIdx
+ break OUTER
+ }
+ }
+ }
+
+ //Now, we find the first non-Ginkgo function call
+ if specGoRoutineIdx > -1 {
+ for runNodeFunctionCallIdx >= 0 {
+ fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
+ file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
+ // these are all things that could potentially happen from within ginkgo
+ if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
+ runNodeFunctionCallIdx--
+ continue
+ }
+ if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
+
+ }
+ //found it! lets add its package of interest
+ addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
+ break
+ }
+ }
+
+ ginkgoEntryPointIdx := -1
+OUTER_GINKGO_ENTRY_POINT:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for _, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
+ ginkgoEntryPointIdx = goroutineIdx
+ break OUTER_GINKGO_ENTRY_POINT
+ }
+ }
+ }
+
+ // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
+ // Any goroutines with highlighted lines end up in the HighlightGoRoutines
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ if goroutineIdx == ginkgoEntryPointIdx {
+ continue
+ }
+ if goroutineIdx == specGoRoutineIdx {
+ pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
+ }
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if isPackageOfInterest(functionCall.Filename) {
+ goroutine.Stack[functionCallIdx].Highlight = true
+ goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
+ }
+ }
+ }
+
+ if !includeAll {
+ goroutines := []types.Goroutine{pr.SpecGoroutine()}
+ goroutines = append(goroutines, pr.HighlightedGoroutines()...)
+ pr.Goroutines = goroutines
+ }
+
+ return pr, nil
+}
+
+func extractRunningGoroutines() ([]types.Goroutine, error) {
+ var stack []byte
+ for size := 64 * 1024; ; size *= 2 {
+ stack = make([]byte, size)
+ if n := runtime.Stack(stack, true); n < size {
+ stack = stack[:n]
+ break
+ }
+ }
+ r := bufio.NewReader(bytes.NewReader(stack))
+ out := []types.Goroutine{}
+ idx := -1
+ for {
+ line, err := r.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+
+ line = strings.TrimSuffix(line, "\n")
+
+ //skip blank lines
+ if line == "" {
+ continue
+ }
+
+ //parse headers for new goroutine frames
+ if strings.HasPrefix(line, "goroutine") {
+ out = append(out, types.Goroutine{})
+ idx = len(out) - 1
+
+ line = strings.TrimPrefix(line, "goroutine ")
+ line = strings.TrimSuffix(line, ":")
+ fields := strings.SplitN(line, " ", 2)
+ if len(fields) != 2 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
+ }
+ out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
+ }
+
+ out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
+ continue
+ }
+
+ //if we are here we must be at a function call entry in the stack
+ functionCall := types.FunctionCall{
+ Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
+ }
+
+ line, err = r.ReadString('\n')
+ line = strings.TrimSuffix(line, "\n")
+ if err == io.EOF {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
+ }
+ line = strings.TrimLeft(line, " \t")
+ delimiterIdx := strings.LastIndex(line, ":")
+ if delimiterIdx == -1 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
+ }
+ functionCall.Filename = line[:delimiterIdx]
+ line = strings.Split(line[delimiterIdx+1:], " ")[0]
+ lineNumber, err := strconv.ParseInt(line, 10, 64)
+ functionCall.Line = int(lineNumber)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
+ }
+ out[idx].Stack = append(out[idx].Stack, functionCall)
+ }
+
+ return out, nil
+}
+
+func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
+ if filename == "" {
+ return []string{}, 0
+ }
+
+ var lines []string
+ var ok bool
+ if lines, ok = _SOURCE_CACHE[filename]; !ok {
+ sourceRoots := []string{""}
+ sourceRoots = append(sourceRoots, configuredSourceRoots...)
+ var data []byte
+ var err error
+ var found bool
+ for _, root := range sourceRoots {
+ data, err = os.ReadFile(filepath.Join(root, filename))
+ if err == nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return []string{}, 0
+ }
+ lines = strings.Split(string(data), "\n")
+ _SOURCE_CACHE[filename] = lines
+ }
+
+ startIndex := lineNumber - span - 1
+ endIndex := startIndex + span + span + 1
+ if startIndex < 0 {
+ startIndex = 0
+ }
+ if endIndex > len(lines) {
+ endIndex = len(lines)
+ }
+ highlightIndex := lineNumber - 1 - startIndex
+ return lines[startIndex:endIndex], highlightIndex
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
new file mode 100644
index 000000000..61e0ed306
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
@@ -0,0 +1,11 @@
+//go:build freebsd || openbsd || netbsd || darwin || dragonfly
+// +build freebsd openbsd netbsd darwin dragonfly
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
new file mode 100644
index 000000000..ad30de459
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
@@ -0,0 +1,11 @@
+//go:build linux || solaris
+// +build linux solaris
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
new file mode 100644
index 000000000..0eca2516a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package internal
+
+import "os"
+
+var PROGRESS_SIGNALS = []os.Signal{}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
new file mode 100644
index 000000000..2c6e260f7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
@@ -0,0 +1,79 @@
+package internal
+
+import (
+ "context"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ProgressReporterManager struct {
+ lock *sync.Mutex
+ progressReporters map[int]func() string
+ prCounter int
+}
+
+func NewProgressReporterManager() *ProgressReporterManager {
+ return &ProgressReporterManager{
+ progressReporters: map[int]func() string{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ prm.prCounter += 1
+ prCounter := prm.prCounter
+ prm.progressReporters[prCounter] = reporter
+
+ return func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ delete(prm.progressReporters, prCounter)
+ }
+}
+
+func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
+ prm.lock.Lock()
+ keys := []int{}
+ for key := range prm.progressReporters {
+ keys = append(keys, key)
+ }
+ sort.Ints(keys)
+ reporters := []func() string{}
+ for _, key := range keys {
+ reporters = append(reporters, prm.progressReporters[key])
+ }
+ prm.lock.Unlock()
+
+ if len(reporters) == 0 {
+ return nil
+ }
+ out := []string{}
+ for _, reporter := range reporters {
+ reportC := make(chan string, 1)
+ go func() {
+ defer func() {
+ e := recover()
+ if e != nil {
+ failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ reportC <- "failed to query attached progress reporter"
+ }
+ }()
+ reportC <- reporter()
+ }()
+ var report string
+ select {
+ case report = <-reportC:
+ case <-ctx.Done():
+ return out
+ }
+ if strings.TrimSpace(report) != "" {
+ out = append(out, report)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
new file mode 100644
index 000000000..cc351a39b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -0,0 +1,39 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ReportEntry = types.ReportEntry
+
+func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+ out := ReportEntry{
+ Visibility: types.ReportEntryVisibilityAlways,
+ Name: name,
+ Location: cl,
+ Time: time.Now(),
+ }
+ var didSetValue = false
+ for _, arg := range args {
+ switch x := arg.(type) {
+ case types.ReportEntryVisibility:
+ out.Visibility = x
+ case types.CodeLocation:
+ out.Location = x
+ case Offset:
+ out.Location = types.NewCodeLocation(2 + int(x))
+ case time.Time:
+ out.Time = x
+ default:
+ if didSetValue {
+ return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
+ }
+ out.Value = types.WrapEntryValue(arg)
+ didSetValue = true
+ }
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
new file mode 100644
index 000000000..7c4ee5bb7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
@@ -0,0 +1,87 @@
+package internal
+
+import (
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Spec struct {
+ Nodes Nodes
+ Skip bool
+}
+
+func (s Spec) SubjectID() uint {
+ return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
+}
+
+func (s Spec) Text() string {
+ texts := []string{}
+ for i := range s.Nodes {
+ if s.Nodes[i].Text != "" {
+ texts = append(texts, s.Nodes[i].Text)
+ }
+ }
+ return strings.Join(texts, " ")
+}
+
+func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ return s.Nodes.FirstNodeWithType(nodeTypes)
+}
+
+func (s Spec) FlakeAttempts() int {
+ flakeAttempts := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].FlakeAttempts > 0 {
+ flakeAttempts = s.Nodes[i].FlakeAttempts
+ }
+ }
+
+ return flakeAttempts
+}
+
+func (s Spec) MustPassRepeatedly() int {
+ mustPassRepeatedly := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].MustPassRepeatedly > 0 {
+ mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly
+ }
+ }
+
+ return mustPassRepeatedly
+}
+
+func (s Spec) SpecTimeout() time.Duration {
+ return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout
+}
+
+type Specs []Spec
+
+func (s Specs) HasAnySpecsMarkedPending() bool {
+ for i := range s {
+ if s[i].Nodes.HasNodeMarkedPending() {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s Specs) CountWithoutSkip() int {
+ n := 0
+ for i := range s {
+ if !s[i].Skip {
+ n += 1
+ }
+ }
+ return n
+}
+
+func (s Specs) AtIndices(indices SpecIndices) Specs {
+ out := make(Specs, len(indices))
+ for i, idx := range indices {
+ out[i] = s[idx]
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
new file mode 100644
index 000000000..2515b84a1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
@@ -0,0 +1,47 @@
+package internal
+
+import (
+ "context"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SpecContext interface {
+ context.Context
+
+ SpecReport() types.SpecReport
+ AttachProgressReporter(func() string) func()
+}
+
+type specContext struct {
+ context.Context
+ *ProgressReporterManager
+
+ cancel context.CancelFunc
+
+ suite *Suite
+}
+
+/*
+SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context.
+
+Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses.
+
+This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
+*/
+func NewSpecContext(suite *Suite) *specContext {
+ ctx, cancel := context.WithCancel(context.Background())
+ sc := &specContext{
+ cancel: cancel,
+ suite: suite,
+ ProgressReporterManager: NewProgressReporterManager(),
+ }
+ ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
+ sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
+
+ return sc
+}
+
+func (sc *specContext) SpecReport() types.SpecReport {
+ return sc.suite.CurrentSpecReport()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
new file mode 100644
index 000000000..ea0d259d9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -0,0 +1,1017 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/net/context"
+)
+
+type Phase uint
+
+const (
+ PhaseBuildTopLevel Phase = iota
+ PhaseBuildTree
+ PhaseRun
+)
+
+var PROGRESS_REPORTER_DEADLING = 5 * time.Second
+
+type Suite struct {
+ tree *TreeNode
+ topLevelContainers Nodes
+
+ *ProgressReporterManager
+
+ phase Phase
+
+ suiteNodes Nodes
+ cleanupNodes Nodes
+
+ failer *Failer
+ reporter reporters.Reporter
+ writer WriterInterface
+ outputInterceptor OutputInterceptor
+ interruptHandler interrupt_handler.InterruptHandlerInterface
+ config types.SuiteConfig
+ deadline time.Time
+
+ skipAll bool
+ report types.Report
+ currentSpecReport types.SpecReport
+ currentNode Node
+ currentNodeStartTime time.Time
+
+ currentSpecContext *specContext
+
+ currentByStep types.SpecEvent
+ timelineOrder int
+
+ /*
+ We don't need to lock around all operations. Just those that *could* happen concurrently.
+
+ Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
+
+ However, there are some operations that can happen concurrently:
+
+ - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent.
+ - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
+ */
+ selectiveLock *sync.Mutex
+
+ client parallel_support.Client
+}
+
+func NewSuite() *Suite {
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+
+ selectiveLock: &sync.Mutex{},
+ }
+}
+
+func (suite *Suite) BuildTree() error {
+ // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
+ // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
+ suite.phase = PhaseBuildTree
+ for _, topLevelContainer := range suite.topLevelContainers {
+ err := suite.PushNode(topLevelContainer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+ ApplyNestedFocusPolicyToTree(suite.tree)
+ specs := GenerateSpecsFromTreeRoot(suite.tree)
+ specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
+
+ suite.phase = PhaseRun
+ suite.client = client
+ suite.failer = failer
+ suite.reporter = reporter
+ suite.writer = writer
+ suite.outputInterceptor = outputInterceptor
+ suite.interruptHandler = interruptHandler
+ suite.config = suiteConfig
+
+ if suite.config.Timeout > 0 {
+ suite.deadline = time.Now().Add(suite.config.Timeout)
+ }
+
+ cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
+
+ success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
+
+ cancelProgressHandler()
+
+ return success, hasProgrammaticFocus
+}
+
+func (suite *Suite) InRunPhase() bool {
+ return suite.phase == PhaseRun
+}
+
+/*
+ Tree Construction methods
+
+ PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
+*/
+
+func (suite *Suite) PushNode(node Node) error {
+ if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ return suite.pushCleanupNode(node)
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ return suite.pushSuiteNode(node)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ if node.MarkedSerial {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
+ return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.MarkedContinueOnFailure {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
+ }
+ }
+
+ if node.NodeType == types.NodeTypeContainer {
+ // During PhaseBuildTopLevel we only track the top level containers without entering them
+ // We only enter the top level container nodes during PhaseBuildTree
+ //
+ // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
+ // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
+ // is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
+ if suite.phase == PhaseBuildTopLevel {
+ suite.topLevelContainers = append(suite.topLevelContainers, node)
+ return nil
+ }
+ if suite.phase == PhaseBuildTree {
+ parentTree := suite.tree
+ suite.tree = &TreeNode{Node: node}
+ parentTree.AppendChild(suite.tree)
+ err := func() (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
+ }
+ }()
+ node.Body(nil)
+ return err
+ }()
+ suite.tree = parentTree
+ return err
+ }
+ } else {
+ suite.tree.AppendChild(&TreeNode{Node: node})
+ return nil
+ }
+
+ return nil
+}
+
+func (suite *Suite) pushSuiteNode(node Node) error {
+ if suite.phase == PhaseBuildTree {
+ return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if len(existingBefores) > 0 {
+ return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
+ }
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if len(existingAfters) > 0 {
+ return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
+ }
+ }
+
+ suite.suiteNodes = append(suite.suiteNodes, node)
+ return nil
+}
+
+func (suite *Suite) pushCleanupNode(node Node) error {
+ if suite.phase != PhaseRun || suite.currentNode.IsZero() {
+ return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
+ }
+
+ switch suite.currentNode.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ node.NodeType = types.NodeTypeCleanupAfterSuite
+ case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
+ node.NodeType = types.NodeTypeCleanupAfterAll
+ case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
+ case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
+ default:
+ node.NodeType = types.NodeTypeCleanupAfterEach
+ }
+
+ node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
+ node.NestingLevel = suite.currentNode.NestingLevel
+ suite.selectiveLock.Lock()
+ suite.cleanupNodes = append(suite.cleanupNodes, node)
+ suite.selectiveLock.Unlock()
+
+ return nil
+}
+
+func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ suite.timelineOrder += 1
+ return types.TimelineLocation{
+ Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
+ Order: suite.timelineOrder,
+ Time: time.Now(),
+ }
+}
+
+func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
+ event.TimelineLocation = suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+ return event
+}
+
+func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
+ event := startEvent
+ event.SpecEventType = eventType
+ event.TimelineLocation = suite.generateTimelineLocation()
+ event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+}
+
+func (suite *Suite) By(text string, callback ...func()) error {
+ cl := types.NewCodeLocation(2)
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.ByNotDuringRunPhase(cl)
+ }
+
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventByStart,
+ CodeLocation: cl,
+ Message: text,
+ })
+ suite.selectiveLock.Lock()
+ suite.currentByStep = event
+ suite.selectiveLock.Unlock()
+
+ if len(callback) == 1 {
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ suite.handleSpecEventEnd(types.SpecEventByEnd, event)
+ }()
+ callback[0]()
+ } else if len(callback) > 1 {
+ panic("just one callback per By, please")
+ }
+ return nil
+}
+
+/*
+Spec Running methods - used during PhaseRun
+*/
+func (suite *Suite) CurrentSpecReport() types.SpecReport {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ report := suite.currentSpecReport
+ if suite.writer != nil {
+ report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ }
+ report.ReportEntries = make([]ReportEntry, len(report.ReportEntries))
+ copy(report.ReportEntries, suite.currentSpecReport.ReportEntries)
+ return report
+}
+
+func (suite *Suite) AddReportEntry(entry ReportEntry) error {
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
+ }
+ entry.TimelineLocation = suite.generateTimelineLocation()
+ entry.Time = entry.TimelineLocation.Time
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitReportEntry(entry)
+ return nil
+}
+
+func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
+ timelineLocation := suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
+ defer cancel()
+ var additionalReports []string
+ if suite.currentSpecContext != nil {
+ additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
+ }
+ additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
+ gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
+ pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
+
+ if err != nil {
+ fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
+ }
+ return pr
+}
+
+func (suite *Suite) handleProgressSignal() {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}You've requested a progress report:{{/}}"
+ suite.emitProgressReport(report)
+}
+
+func (suite *Suite) emitProgressReport(report types.ProgressReport) {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.EmitProgressReport(report)
+ if suite.isRunningInParallel() {
+ err := suite.client.PostEmitProgressReport(report)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ }
+}
+
+func (suite *Suite) isRunningInParallel() bool {
+ return suite.config.ParallelTotal > 1
+}
+
+func (suite *Suite) processCurrentSpecReport() {
+ suite.reporter.DidRun(suite.currentSpecReport)
+ if suite.isRunningInParallel() {
+ suite.client.PostDidRun(suite.currentSpecReport)
+ }
+ suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
+
+ if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.report.SuiteSucceeded = false
+ if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
+ suite.skipAll = true
+ if suite.isRunningInParallel() {
+ suite.client.PostAbort()
+ }
+ }
+ }
+}
+
+func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
+ numSpecsThatWillBeRun := specs.CountWithoutSkip()
+
+ suite.report = types.Report{
+ SuitePath: suitePath,
+ SuiteDescription: description,
+ SuiteLabels: suiteLabels,
+ SuiteConfig: suite.config,
+ SuiteHasProgrammaticFocus: hasProgrammaticFocus,
+ PreRunStats: types.PreRunStats{
+ TotalSpecs: len(specs),
+ SpecsThatWillRun: numSpecsThatWillBeRun,
+ },
+ StartTime: time.Now(),
+ }
+
+ suite.reporter.SuiteWillBegin(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteWillBegin(suite.report)
+ }
+
+ suite.report.SuiteSucceeded = true
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
+
+ ranBeforeSuite := suite.report.SuiteSucceeded
+ if suite.report.SuiteSucceeded {
+ suite.runBeforeSuite(numSpecsThatWillBeRun)
+ }
+
+ if suite.report.SuiteSucceeded {
+ groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
+ nextIndex := MakeIncrementingIndexCounter()
+ if suite.isRunningInParallel() {
+ nextIndex = suite.client.FetchNextCounter
+ }
+
+ for {
+ groupedSpecIdx, err := nextIndex()
+ if err != nil {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
+ suite.report.SuiteSucceeded = false
+ break
+ }
+
+ if groupedSpecIdx >= len(groupedSpecIndices) {
+ if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
+ groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
+ suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ continue
+ }
+ break
+ }
+
+ // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
+ // we encapsulate that complexity in the notion of a Group that can run
+ // Group is really just an extension of suite so it gets passed a suite and has access to all its internals
+ // Note that group is stateful and intended for single use!
+ newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
+ }
+
+ if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
+ suite.report.SuiteSucceeded = false
+ }
+ }
+
+ if ranBeforeSuite {
+ suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Interrupted() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
+ suite.report.SuiteSucceeded = false
+ }
+ suite.report.EndTime = time.Now()
+ suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
+ if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed")
+ suite.report.SuiteSucceeded = false
+ }
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
+ suite.reporter.SuiteDidEnd(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteDidEnd(suite.report)
+ }
+
+ return suite.report.SuiteSucceeded
+}
+
+func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
+ beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: beforeSuiteNode.NodeType,
+ LeafNodeLocation: beforeSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(beforeSuiteNode)
+ if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
+ suite.skipAll = true
+ }
+ suite.processCurrentSpecReport()
+ }
+}
+
+func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
+ afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: afterSuiteNode.NodeType,
+ LeafNodeLocation: afterSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(afterSuiteNode)
+ suite.processCurrentSpecReport()
+ }
+
+ afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
+ if len(afterSuiteCleanup) > 0 {
+ for _, cleanupNode := range afterSuiteCleanup {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: cleanupNode.NodeType,
+ LeafNodeLocation: cleanupNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(cleanupNode)
+ suite.processCurrentSpecReport()
+ }
+ }
+}
+
+func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
+ nodes := spec.Nodes.WithType(nodeType)
+ if nodeType == types.NodeTypeReportAfterEach {
+ nodes = nodes.SortedByDescendingNestingLevel()
+ }
+ if nodeType == types.NodeTypeReportBeforeEach {
+ nodes = nodes.SortedByAscendingNestingLevel()
+ }
+ if len(nodes) == 0 {
+ return
+ }
+
+ for i := range nodes {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ report := suite.currentSpecReport
+ nodes[i].Body = func(SpecContext) {
+ nodes[i].ReportEachBody(report)
+ }
+ state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
+
+ // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
+ // Also, if the reporter is every aborted - always override the state to propagate the abort
+ if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
+ suite.currentSpecReport.State = state
+ suite.currentSpecReport.Failure = failure
+ }
+ suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ }
+}
+
+func (suite *Suite) runSuiteNode(node Node) {
+ if suite.config.DryRun {
+ suite.currentSpecReport.State = types.SpecStatePassed
+ return
+ }
+
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ var err error
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ case types.NodeTypeCleanupAfterSuite:
+ if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedBeforeSuite:
+ var data []byte
+ var runAllProcs bool
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+ node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) }
+ node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutput()
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
+ } else {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
+ }
+ }
+ runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
+ } else {
+ var proc1State types.SpecState
+ proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
+ switch proc1State {
+ case types.SpecStatePassed:
+ runAllProcs = true
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout:
+ err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
+ case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
+ suite.currentSpecReport.State = proc1State
+ }
+ }
+ if runAllProcs {
+ node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) }
+ node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedAfterSuite:
+ node.Body = node.SynchronizedAfterSuiteAllProcsBody
+ node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+
+ node.Body = node.SynchronizedAfterSuiteProc1Body
+ node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext
+ state, failure := suite.runNode(node, time.Time{}, "")
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
+ }
+ }
+ }
+ }
+
+ if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ }
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
+ nodes := suite.suiteNodes.WithType(nodeType)
+ // only run ReportAfterSuite on proc 1
+ if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
+ return
+ }
+ // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
+ state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
+ if err != nil || state.Is(types.SpecStateFailed) {
+ suite.report.SuiteSucceeded = false
+ }
+ return
+ }
+
+ for _, node := range nodes {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: node.NodeType,
+ LeafNodeLocation: node.CodeLocation,
+ LeafNodeText: node.Text,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runReportSuiteNode(node, suite.report)
+ suite.processCurrentSpecReport()
+ }
+
+ // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
+ if suite.report.SuiteSucceeded {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
+ } else {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
+ }
+ }
+}
+
+func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
+ // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
+ if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
+ aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
+ if err != nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ return
+ }
+ report = report.Add(aggregatedReport)
+ }
+
+ node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+
+ suite.selectiveLock.Lock()
+ suite.currentNode = node
+ suite.currentNodeStartTime = time.Now()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentNode = Node{}
+ suite.currentNodeStartTime = time.Time{}
+ suite.selectiveLock.Unlock()
+ }()
+
+ if text == "" {
+ text = "TOP-LEVEL"
+ }
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventNodeStart,
+ NodeType: node.NodeType,
+ Message: text,
+ CodeLocation: node.CodeLocation,
+ })
+ defer func() {
+ suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
+ }()
+
+ var failure types.Failure
+ failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
+ if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ failure.FailureNodeContext = types.FailureNodeIsLeafNode
+ } else if node.NestingLevel <= 0 {
+ failure.FailureNodeContext = types.FailureNodeAtTopLevel
+ } else {
+ failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
+ }
+ var outcome types.SpecState
+
+ gracePeriod := suite.config.GracePeriod
+ if node.GracePeriod >= 0 {
+ gracePeriod = node.GracePeriod
+ }
+
+ now := time.Now()
+ deadline := suite.deadline
+ timeoutInPlay := "suite"
+ if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
+ deadline = specDeadline
+ timeoutInPlay = "spec"
+ }
+ if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ }
+ if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
+ //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
+ if node.NodeTimeout > 0 {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ } else {
+ deadline = now.Add(gracePeriod)
+ timeoutInPlay = "grace period"
+ }
+ }
+
+ if !node.HasContext {
+ // this maps onto the pre-context behavior:
+ // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
+ // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
+ gracePeriod = 0
+ }
+
+ sc := NewSpecContext(suite)
+ defer sc.cancel()
+
+ suite.selectiveLock.Lock()
+ suite.currentSpecContext = sc
+ suite.selectiveLock.Unlock()
+
+ var deadlineChannel <-chan time.Time
+ if !deadline.IsZero() {
+ deadlineChannel = time.After(deadline.Sub(now))
+ }
+ var gracePeriodChannel <-chan time.Time
+
+ outcomeC := make(chan types.SpecState)
+ failureC := make(chan types.Failure)
+
+ go func() {
+ finished := false
+ defer func() {
+ if e := recover(); e != nil || !finished {
+ suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
+ }
+
+ outcomeFromRun, failureFromRun := suite.failer.Drain()
+ failureFromRun.TimelineLocation = suite.generateTimelineLocation()
+ outcomeC <- outcomeFromRun
+ failureC <- failureFromRun
+ }()
+
+ node.Body(sc)
+ finished = true
+ }()
+
+ // progress polling timer and channel
+ var emitProgressNow <-chan time.Time
+ var progressPoller *time.Timer
+ var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
+ if node.PollProgressAfter >= 0 {
+ pollProgressAfter = node.PollProgressAfter
+ }
+ if node.PollProgressInterval >= 0 {
+ pollProgressInterval = node.PollProgressInterval
+ }
+ if pollProgressAfter > 0 {
+ progressPoller = time.NewTimer(pollProgressAfter)
+ emitProgressNow = progressPoller.C
+ defer progressPoller.Stop()
+ }
+
+ // now we wait for an outcome, an interrupt, a timeout, or a progress poll
+ for {
+ select {
+ case outcomeFromRun := <-outcomeC:
+ failureFromRun := <-failureC
+ if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
+ // we've already been interrupted/timed out. we just managed to actually exit
+ // before the grace period elapsed
+ // if we have a failure message we attach it as an additional failure
+ if outcomeFromRun != types.SpecStatePassed {
+ additionalFailure := types.AdditionalFailure{
+ State: outcomeFromRun,
+ Failure: failure, //we make a copy - this will include all the configuration set up above...
+ }
+ //...and then we update the failure with the details from failureFromRun
+ additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ additionalFailure.Failure.ProgressReport = types.ProgressReport{}
+ if outcome == types.SpecStateTimedout {
+ additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
+ } else {
+ additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
+ }
+ suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
+ failure.AdditionalFailure = &additionalFailure
+ }
+ return outcome, failure
+ }
+ if outcomeFromRun.Is(types.SpecStatePassed) {
+ return outcomeFromRun, types.Failure{}
+ } else {
+ failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ suite.reporter.EmitFailure(outcomeFromRun, failure)
+ return outcomeFromRun, failure
+ }
+ case <-gracePeriodChannel:
+ if node.HasContext && outcome.Is(types.SpecStateTimedout) {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:"
+ suite.emitProgressReport(report)
+ }
+ return outcome, failure
+ case <-deadlineChannel:
+ // we're out of time - the outcome is a timeout and we capture the failure and progress report
+ outcome = types.SpecStateTimedout
+ failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
+ failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
+ deadlineChannel = nil
+ suite.reporter.EmitFailure(outcome, failure)
+
+ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where
+ // the spec is actually stuck
+ sc.cancel()
+ //and now we wait for the grace period
+ gracePeriodChannel = time.After(gracePeriod)
+ case <-interruptStatus.Channel:
+ interruptStatus = suite.interruptHandler.Status()
+ // ignore interruption from other process if we are cleaning up or reporting
+ if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
+ node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ continue
+ }
+
+ deadlineChannel = nil // don't worry about deadlines, time's up now
+
+ failureTimelineLocation := suite.generateTimelineLocation()
+ progressReport := suite.generateProgressReport(true)
+
+ if outcome == types.SpecStateInvalid {
+ outcome = types.SpecStateInterrupted
+ failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
+ if interruptStatus.ShouldIncludeProgressReport() {
+ failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
+ }
+ suite.reporter.EmitFailure(outcome, failure)
+ }
+
+ progressReport = progressReport.WithoutOtherGoroutines()
+ sc.cancel()
+
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ if interruptStatus.ShouldIncludeProgressReport() {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
+ suite.emitProgressReport(progressReport)
+ }
+ return outcome, failure
+ }
+ if interruptStatus.ShouldIncludeProgressReport() {
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ }
+ suite.emitProgressReport(progressReport)
+ }
+
+ if gracePeriodChannel == nil {
+ // we haven't given grace yet... so let's
+ gracePeriodChannel = time.After(gracePeriod)
+ } else {
+ // we've already given grace. time's up. now.
+ return outcome, failure
+ }
+ case <-emitProgressNow:
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}Automatically polling progress:{{/}}"
+ suite.emitProgressReport(report)
+ if pollProgressInterval > 0 {
+ progressPoller.Reset(pollProgressInterval)
+ }
+ }
+ }
+}
+
+// TODO: search for usages and consider if reporter.EmitFailure() is necessary
+func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
+ return types.Failure{
+ Message: message,
+ Location: node.CodeLocation,
+ TimelineLocation: suite.generateTimelineLocation(),
+ FailureNodeContext: types.FailureNodeIsLeafNode,
+ FailureNodeType: node.NodeType,
+ FailureNodeLocation: node.CodeLocation,
+ }
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
new file mode 100644
index 000000000..73e265565
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -0,0 +1,210 @@
+package testingtproxy
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type failFunc func(message string, callerSkip ...int)
+type skipFunc func(message string, callerSkip ...int)
+type cleanupFunc func(args ...any)
+type reportFunc func() types.SpecReport
+type addReportEntryFunc func(names string, args ...any)
+type ginkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+}
+type ginkgoRecoverFunc func()
+type attachProgressReporterFunc func(func() string) func()
+
+func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
+ return &ginkgoTestingTProxy{
+ fail: fail,
+ offset: offset,
+ writer: writer,
+ skip: skip,
+ cleanup: cleanup,
+ report: report,
+ addReportEntry: addReportEntry,
+ ginkgoRecover: ginkgoRecover,
+ attachProgressReporter: attachProgressReporter,
+ randomSeed: randomSeed,
+ parallelProcess: parallelProcess,
+ parallelTotal: parallelTotal,
+ f: formatter.NewWithNoColorBool(noColor),
+ }
+}
+
+type ginkgoTestingTProxy struct {
+ fail failFunc
+ skip skipFunc
+ cleanup cleanupFunc
+ report reportFunc
+ offset int
+ writer ginkgoWriterInterface
+ addReportEntry addReportEntryFunc
+ ginkgoRecover ginkgoRecoverFunc
+ attachProgressReporter attachProgressReporterFunc
+ randomSeed int64
+ parallelProcess int
+ parallelTotal int
+ f formatter.Formatter
+}
+
+// basic testing.T support
+
+func (t *ginkgoTestingTProxy) Cleanup(f func()) {
+ t.cleanup(f, internal.Offset(1))
+}
+
+func (t *ginkgoTestingTProxy) Setenv(key, value string) {
+ originalValue, exists := os.LookupEnv(key)
+ if exists {
+ t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
+ } else {
+ t.cleanup(os.Unsetenv, key, internal.Offset(1))
+ }
+
+ err := os.Setenv(key, value)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+ return t.report().Failed()
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Helper() {
+ types.MarkAsHelper(1)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+ fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+ t.Log(fmt.Sprintf(format, args...))
+}
+
+func (t *ginkgoTestingTProxy) Name() string {
+ return t.report().FullText()
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+ // No-op
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+ t.skip(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+ t.skip("skip", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+ t.skip(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+ return t.report().State.Is(types.SpecStateSkipped)
+}
+
+func (t *ginkgoTestingTProxy) TempDir() string {
+ tmpDir, err := os.MkdirTemp("", "ginkgo")
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
+ return ""
+ }
+ t.cleanup(os.RemoveAll, tmpDir)
+
+ return tmpDir
+}
+
+// FullGinkgoTInterface
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) Print(a ...any) {
+ t.writer.Print(a...)
+}
+func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
+ t.writer.Printf(format, a...)
+}
+func (t *ginkgoTestingTProxy) Println(a ...any) {
+ t.writer.Println(a...)
+}
+func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
+ return t.f.F(format, args...)
+}
+func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
+ return t.f.Fi(indentation, format, args...)
+}
+func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
+ return t.f.Fiw(indentation, maxWidth, format, args...)
+}
+func (t *ginkgoTestingTProxy) RenderTimeline() string {
+ return reporters.RenderTimeline(t.report(), false)
+}
+func (t *ginkgoTestingTProxy) GinkgoRecover() {
+ t.ginkgoRecover()
+}
+func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
+ finalArgs := []any{internal.Offset(1)}
+ t.cleanup(append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) RandomSeed() int64 {
+ return t.randomSeed
+}
+func (t *ginkgoTestingTProxy) ParallelProcess() int {
+ return t.parallelProcess
+}
+func (t *ginkgoTestingTProxy) ParallelTotal() int {
+ return t.parallelTotal
+}
+func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
+ return t.attachProgressReporter(f)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
new file mode 100644
index 000000000..f9d1eeb8f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
@@ -0,0 +1,77 @@
+package internal
+
+import "github.com/onsi/ginkgo/v2/types"
+
+type TreeNode struct {
+ Node Node
+ Parent *TreeNode
+ Children TreeNodes
+}
+
+func (tn *TreeNode) AppendChild(child *TreeNode) {
+ tn.Children = append(tn.Children, child)
+ child.Parent = tn
+}
+
+func (tn *TreeNode) AncestorNodeChain() Nodes {
+ if tn.Parent == nil || tn.Parent.Node.IsZero() {
+ return Nodes{tn.Node}
+ }
+ return append(tn.Parent.AncestorNodeChain(), tn.Node)
+}
+
+type TreeNodes []*TreeNode
+
+func (tn TreeNodes) Nodes() Nodes {
+ out := make(Nodes, len(tn))
+ for i := range tn {
+ out[i] = tn[i].Node
+ }
+ return out
+}
+
+func (tn TreeNodes) WithID(id uint) *TreeNode {
+ for i := range tn {
+ if tn[i].Node.ID == id {
+ return tn[i]
+ }
+ }
+
+ return nil
+}
+
+func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
+ var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
+ walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
+ tests := Specs{}
+
+ nodes := make(Nodes, len(trees))
+ for i := range trees {
+ nodes[i] = trees[i].Node
+ nodes[i].NestingLevel = nestingLevel
+ }
+
+ for i := range nodes {
+ if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
+ continue
+ }
+ leftNodes, rightNodes := nodes.SplitAround(nodes[i])
+ leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
+ rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
+
+ leftNodes = lNodes.CopyAppend(leftNodes...)
+ rightNodes = rightNodes.CopyAppend(rNodes...)
+
+ if nodes[i].NodeType.Is(types.NodeTypeIt) {
+ tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
+ } else {
+ treeNode := trees.WithID(nodes[i].ID)
+ tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
+ }
+ }
+
+ return tests
+ }
+
+ return walkTree(0, Nodes{}, Nodes{}, tree.Children)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
new file mode 100644
index 000000000..574f172df
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -0,0 +1,140 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+type WriterMode uint
+
+const (
+ WriterModeStreamAndBuffer WriterMode = iota
+ WriterModeBufferOnly
+)
+
+type WriterInterface interface {
+ io.Writer
+
+ Truncate()
+ Bytes() []byte
+ Len() int
+}
+
+// Writer implements WriterInterface and GinkgoWriterInterface
+type Writer struct {
+ buffer *bytes.Buffer
+ outWriter io.Writer
+ lock *sync.Mutex
+ mode WriterMode
+
+ streamIndent []byte
+ indentNext bool
+
+ teeWriters []io.Writer
+}
+
+func NewWriter(outWriter io.Writer) *Writer {
+ return &Writer{
+ buffer: &bytes.Buffer{},
+ lock: &sync.Mutex{},
+ outWriter: outWriter,
+ mode: WriterModeStreamAndBuffer,
+ streamIndent: []byte(" "),
+ indentNext: true,
+ }
+}
+
+func (w *Writer) SetMode(mode WriterMode) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.mode = mode
+}
+
+func (w *Writer) Len() int {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ return w.buffer.Len()
+}
+
+var newline = []byte("\n")
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ for _, teeWriter := range w.teeWriters {
+ teeWriter.Write(b)
+ }
+
+ if w.mode == WriterModeStreamAndBuffer {
+ line, remaining, found := []byte{}, b, false
+ for len(remaining) > 0 {
+ line, remaining, found = bytes.Cut(remaining, newline)
+ if len(line) > 0 {
+ if w.indentNext {
+ w.outWriter.Write(w.streamIndent)
+ w.indentNext = false
+ }
+ w.outWriter.Write(line)
+ }
+ if found {
+ w.outWriter.Write(newline)
+ w.indentNext = true
+ }
+ }
+ }
+ return w.buffer.Write(b)
+}
+
+func (w *Writer) Truncate() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.buffer.Reset()
+}
+
+func (w *Writer) Bytes() []byte {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ b := w.buffer.Bytes()
+ copied := make([]byte, len(b))
+ copy(copied, b)
+ return copied
+}
+
+// GinkgoWriterInterface
+func (w *Writer) TeeTo(writer io.Writer) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = append(w.teeWriters, writer)
+}
+
+func (w *Writer) ClearTeeWriters() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = []io.Writer{}
+}
+
+func (w *Writer) Print(a ...interface{}) {
+ fmt.Fprint(w, a...)
+}
+
+func (w *Writer) Printf(format string, a ...interface{}) {
+ fmt.Fprintf(w, format, a...)
+}
+
+func (w *Writer) Println(a ...interface{}) {
+ fmt.Fprintln(w, a...)
+}
+
+func GinkgoLogrFunc(writer *Writer) logr.Logger {
+ return funcr.New(func(prefix, args string) {
+ writer.Printf("%s\n", args)
+ }, funcr.Options{})
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
new file mode 100644
index 000000000..56b7be758
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -0,0 +1,759 @@
+/*
+Ginkgo's Default Reporter
+
+A number of command line flags are available to tweak Ginkgo's default output.
+
+These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
+*/
+package reporters
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type DefaultReporter struct {
+ conf types.ReporterConfig
+ writer io.Writer
+
+ // managing the emission stream
+ lastCharWasNewline bool
+ lastEmissionWasDelimiter bool
+
+ // rendering
+ specDenoter string
+ retryDenoter string
+ formatter formatter.Formatter
+
+ runningInParallel bool
+ lock *sync.Mutex
+}
+
+func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := NewDefaultReporter(conf, writer)
+ reporter.formatter = formatter.New(formatter.ColorModePassthrough)
+
+ return reporter
+}
+
+func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := &DefaultReporter{
+ conf: conf,
+ writer: writer,
+
+ lastCharWasNewline: true,
+ lastEmissionWasDelimiter: false,
+
+ specDenoter: "•",
+ retryDenoter: "↺",
+ formatter: formatter.NewWithNoColorBool(conf.NoColor),
+ lock: &sync.Mutex{},
+ }
+ if runtime.GOOS == "windows" {
+ reporter.specDenoter = "+"
+ reporter.retryDenoter = "R"
+ }
+
+ return reporter
+}
+
+/* The Reporter Interface */
+
+func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) {
+ r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription))
+ if len(report.SuiteLabels) > 0 {
+ r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
+ }
+ r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
+ }
+ } else {
+ banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath)
+ r.emitBlock(banner)
+ bannerWidth := len(banner)
+ if len(report.SuiteLabels) > 0 {
+ labels := strings.Join(report.SuiteLabels, ", ")
+ r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels))
+ if len(labels)+2 > bannerWidth {
+ bannerWidth = len(labels) + 2
+ }
+ }
+ r.emitBlock(strings.Repeat("=", bannerWidth))
+
+ out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
+ if report.SuiteConfig.RandomizeAllSpecs {
+ out += r.f(" - will randomize all specs")
+ }
+ r.emitBlock(out)
+ r.emit("\n")
+ r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal))
+ }
+ }
+}
+
+func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
+ failures := report.SpecReports.WithState(types.SpecStateFailureStates)
+ if len(failures) > 0 {
+ r.emitBlock("\n")
+ if len(failures) > 1 {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
+ } else {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}"))
+ }
+ for _, specReport := range failures {
+ highlightColor, heading := "{{red}}", "[FAIL]"
+ switch specReport.State {
+ case types.SpecStatePanicked:
+ highlightColor, heading = "{{magenta}}", "[PANICKED!]"
+ case types.SpecStateAborted:
+ highlightColor, heading = "{{coral}}", "[ABORTED]"
+ case types.SpecStateTimedout:
+ highlightColor, heading = "{{orange}}", "[TIMEDOUT]"
+ case types.SpecStateInterrupted:
+ highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
+ }
+ locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true)
+ r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
+ }
+ }
+
+ //summarize the suite
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded {
+ r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime))
+ return
+ }
+
+ r.emitBlock("\n")
+ color, status := "{{green}}{{bold}}", "SUCCESS!"
+ if !report.SuiteSucceeded {
+ color, status = "{{red}}{{bold}}", "FAIL!"
+ }
+
+ specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes
+ r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}",
+ specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates),
+ report.PreRunStats.TotalSpecs,
+ report.RunTime.Seconds()),
+ )
+
+ switch len(report.SpecialSuiteFailureReasons) {
+ case 0:
+ r.emit(r.f(color+"%s{{/}} -- ", status))
+ case 1:
+ r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0]))
+ default:
+ r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", ")))
+ }
+
+ if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 {
+ r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n"))
+ } else {
+ r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed)))
+ r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates)))
+ if specs.CountOfFlakedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs()))
+ }
+ if specs.CountOfRepeatedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs()))
+ }
+ r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending)))
+ r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped)))
+ }
+}
+
+func (r *DefaultReporter) WillRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel {
+ return
+ }
+
+ r.emitDelimiter(0)
+ r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
+}
+
+func (r *DefaultReporter) DidRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ inParallel := report.RunningInParallel
+
+ header := r.specDenoter
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("[%s]", report.LeafNodeType)
+ }
+ highlightColor := r.highlightColorForState(report.State)
+
+ // have we already been streaming the timeline?
+ timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel
+
+ // should we show the timeline?
+ var timeline types.Timeline
+ showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed())
+ if showTimeline {
+ timeline = report.Timeline().WithoutHiddenReportEntries()
+ keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) ||
+ (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) ||
+ (report.Failed() && r.conf.ShowNodeEvents)
+ if !keepVeryVerboseSpecEvents {
+ timeline = timeline.WithoutVeryVerboseSpecEvents()
+ }
+ if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" {
+ // the timeline is completely empty - don't show it
+ showTimeline = false
+ }
+ if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 {
+ //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report
+ failure, isFailure := timeline[0].(types.Failure)
+ if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) {
+ showTimeline = false
+ }
+ }
+ }
+
+ // should we have a separate section for always-visible reports?
+ showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways)
+
+ // should we have a separate section for captured stdout/stderr
+ showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "")
+
+ // given all that - do we have any actual content to show? or are we a single denoter in a stream?
+ reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped))
+
+ // should we show a runtime?
+ includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "")
+
+ // should we show the codelocation block?
+ showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed)
+
+ switch report.State {
+ case types.SpecStatePassed:
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent {
+ return
+ }
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("%s PASSED", header)
+ }
+ if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
+ header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true
+ }
+ case types.SpecStatePending:
+ header = "P"
+ if v.GT(types.VerbosityLevelSuccinct) {
+ header, reportHasContent = "P [PENDING]", true
+ }
+ case types.SpecStateSkipped:
+ header = "S"
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") {
+ header, reportHasContent = "S [SKIPPED]", true
+ }
+ default:
+ header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State))
+ if report.MaxMustPassRepeatedly > 1 {
+ header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts)
+ }
+ }
+
+ // If we have no content to show, jsut emit the header and return
+ if !reportHasContent {
+ r.emit(r.f(highlightColor + header + "{{/}}"))
+ return
+ }
+
+ if includeRuntime {
+ header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
+ }
+
+ // Emit header
+ if !timelineHasBeenStreaming {
+ r.emitDelimiter(0)
+ }
+ r.emitBlock(r.f(highlightColor + header + "{{/}}"))
+ if showCodeLocation {
+ r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false))
+ }
+
+ //Emit Stdout/Stderr Output
+ if showSeparateStdSection {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
+ r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
+ r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
+ }
+
+ if showSeparateVisibilityAlwaysReportsSection {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
+ for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
+ r.emitReportEntry(1, entry)
+ }
+ r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
+ }
+
+ if showTimeline {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
+ r.emitTimeline(1, report, timeline)
+ r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
+ }
+
+ // Emit Failure Message
+ if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) {
+ r.emitBlock("\n")
+ r.emitFailure(1, report.State, report.Failure, true)
+ if len(report.AdditionalFailures) > 0 {
+ r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}"))
+ }
+ }
+
+ r.emitDelimiter(0)
+}
+
+func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
+ switch state {
+ case types.SpecStatePassed:
+ return "{{green}}"
+ case types.SpecStatePending:
+ return "{{yellow}}"
+ case types.SpecStateSkipped:
+ return "{{cyan}}"
+ case types.SpecStateFailed:
+ return "{{red}}"
+ case types.SpecStateTimedout:
+ return "{{orange}}"
+ case types.SpecStatePanicked:
+ return "{{magenta}}"
+ case types.SpecStateInterrupted:
+ return "{{orange}}"
+ case types.SpecStateAborted:
+ return "{{coral}}"
+ default:
+ return "{{gray}}"
+ }
+}
+
+func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
+ return strings.ToUpper(state.String())
+}
+
+func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) {
+ isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)
+ gw := report.CapturedGinkgoWriterOutput
+ cursor := 0
+ for _, entry := range timeline {
+ tl := entry.GetTimelineLocation()
+ if tl.Offset < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
+ cursor = tl.Offset
+ } else if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ cursor = len(gw)
+ }
+ switch x := entry.(type) {
+ case types.Failure:
+ if isVeryVerbose {
+ r.emitFailure(indent, report.State, x, false)
+ } else {
+ r.emitShortFailure(indent, report.State, x)
+ }
+ case types.AdditionalFailure:
+ if isVeryVerbose {
+ r.emitFailure(indent, x.State, x.Failure, true)
+ } else {
+ r.emitShortFailure(indent, x.State, x.Failure)
+ }
+ case types.ReportEntry:
+ r.emitReportEntry(indent, x)
+ case types.ProgressReport:
+ r.emitProgressReport(indent, false, x)
+ case types.SpecEvent:
+ if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
+ r.emitSpecEvent(indent, x, isVeryVerbose)
+ }
+ }
+ }
+ if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ }
+}
+
+func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) {
+ r.emitShortFailure(1, state, failure)
+ } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) {
+ r.emitFailure(1, state, failure, true)
+ }
+}
+
+func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) {
+ r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}",
+ r.humanReadableState(state),
+ failure.FailureNodeType,
+ failure.Location,
+ failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT),
+ ))
+}
+
+func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
+ highlightColor := r.highlightColorForState(state)
+ r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
+ r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ if failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
+ }
+
+ if r.conf.FullTrace || failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
+ r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
+ }
+
+ if !failure.ProgressReport.IsZero() {
+ r.emitBlock("\n")
+ r.emitProgressReport(indent, false, failure.ProgressReport)
+ }
+
+ if failure.AdditionalFailure != nil && includeAdditionalFailure {
+ r.emitBlock("\n")
+ r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true)
+ }
+}
+
+func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
+ r.emitDelimiter(1)
+
+ if report.RunningInParallel {
+ r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
+ }
+ shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
+ r.emitProgressReport(1, shouldEmitGW, report)
+ r.emitDelimiter(1)
+}
+
+func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
+ if report.Message != "" {
+ r.emitBlock(r.fi(indent, report.Message+"\n"))
+ indent += 1
+ }
+ if report.LeafNodeText != "" {
+ subjectIndent := indent
+ if len(report.ContainerHierarchyTexts) > 0 {
+ r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " ")))
+ r.emit(" ")
+ subjectIndent = 0
+ }
+ r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
+ indent += 1
+ }
+ if report.CurrentNodeType != types.NodeTypeInvalid {
+ r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType))
+ if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) {
+ r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
+ }
+
+ r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
+ indent += 1
+ }
+ if report.CurrentStepText != "" {
+ r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
+ indent += 1
+ }
+
+ if indent > 0 {
+ indent -= 1
+ }
+
+ if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
+ limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n")
+ if len(lines) <= limit {
+ r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput))
+ } else {
+ r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
+ for _, line := range lines[len(lines)-limit-1:] {
+ r.emitBlock(r.fi(indent+1, "%s", line))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
+ }
+
+ if !report.SpecGoroutine().IsZero() {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n"))
+ r.emitGoroutines(indent, report.SpecGoroutine())
+ }
+
+ if len(report.AdditionalReports) > 0 {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}"))
+ for i, additionalReport := range report.AdditionalReports {
+ r.emit(r.fi(indent+1, additionalReport))
+ if i < len(report.AdditionalReports)-1 {
+ r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}"))
+ }
+
+ highlightedGoroutines := report.HighlightedGoroutines()
+ if len(highlightedGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n"))
+ r.emitGoroutines(indent, highlightedGoroutines...)
+ }
+
+ otherGoroutines := report.OtherGoroutines()
+ if len(otherGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
+ r.emitGoroutines(indent, otherGoroutines...)
+ }
+}
+
+func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
+ if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever {
+ return
+ }
+ r.emitReportEntry(1, entry)
+}
+
+func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) {
+ r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))))
+ if representation := entry.StringRepresentation(); representation != "" {
+ r.emitBlock(r.fi(indent+1, representation))
+ }
+}
+
+func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) {
+ v := r.conf.Verbosity()
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) {
+ r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose))
+ }
+}
+
+func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) {
+ location := ""
+ if includeLocation {
+ location = fmt.Sprintf("- %s ", event.CodeLocation.String())
+ }
+ switch event.SpecEventType {
+ case types.SpecEventInvalid:
+ return
+ case types.SpecEventByStart:
+ r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventByEnd:
+ r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventNodeStart:
+ r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventNodeEnd:
+ r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventSpecRepeat:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventSpecRetry:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+}
+
+func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
+ for idx, g := range goroutines {
+ color := "{{gray}}"
+ if g.HasHighlights() {
+ color = "{{orange}}"
+ }
+ r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State))
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ r.emitSource(indent+3, fc)
+ } else {
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ }
+ }
+
+ if idx+1 < len(goroutines) {
+ r.emit("\n")
+ }
+ }
+}
+
+func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
+ lines := fc.Source
+ if len(lines) == 0 {
+ return
+ }
+
+ lTrim := 100000
+ for _, line := range lines {
+ lTrimLine := len(line) - len(strings.TrimLeft(line, " \t"))
+ if lTrimLine < lTrim && len(line) > 0 {
+ lTrim = lTrimLine
+ }
+ }
+ if lTrim == 100000 {
+ lTrim = 0
+ }
+
+ for idx, line := range lines {
+ if len(line) > lTrim {
+ line = line[lTrim:]
+ }
+ if idx == fc.SourceHighlight {
+ r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line))
+ } else {
+ r.emit(r.fi(indent, "| %s\n", line))
+ }
+ }
+}
+
+/* Emitting to the writer */
+func (r *DefaultReporter) emit(s string) {
+ r._emit(s, false, false)
+}
+
+func (r *DefaultReporter) emitBlock(s string) {
+ r._emit(s, true, false)
+}
+
+func (r *DefaultReporter) emitDelimiter(indent uint) {
+ r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true)
+}
+
+// a bit ugly - but we're trying to minimize locking on this hot codepath
+func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
+ if len(s) == 0 {
+ return
+ }
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if isDelimiter && r.lastEmissionWasDelimiter {
+ return
+ }
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ }
+ r.lastCharWasNewline = (s[len(s)-1:] == "\n")
+ r.writer.Write([]byte(s))
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ r.lastCharWasNewline = true
+ }
+ r.lastEmissionWasDelimiter = isDelimiter
+}
+
+/* Rendering text */
+func (r *DefaultReporter) f(format string, args ...interface{}) string {
+ return r.formatter.F(format, args...)
+}
+
+func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+ return r.formatter.Fi(indentation, format, args...)
+}
+
+func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
+ return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
+}
+
+func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
+ texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
+ texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
+
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
+ } else {
+ texts = append(texts, r.f(report.LeafNodeText))
+ }
+ labels = append(labels, report.LeafNodeLabels)
+ locations = append(locations, report.LeafNodeLocation)
+
+ failureLocation := report.Failure.FailureNodeLocation
+ if usePreciseFailureLocation {
+ failureLocation = report.Failure.Location
+ }
+
+ highlightIndex := -1
+ switch report.Failure.FailureNodeContext {
+ case types.FailureNodeAtTopLevel:
+ texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
+ locations = append([]types.CodeLocation{failureLocation}, locations...)
+ labels = append([][]string{{}}, labels...)
+ highlightIndex = 0
+ case types.FailureNodeInContainer:
+ i := report.Failure.FailureNodeContainerIndex
+ texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType)
+ locations[i] = failureLocation
+ highlightIndex = i
+ case types.FailureNodeIsLeafNode:
+ i := len(texts) - 1
+ texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText)
+ locations[i] = failureLocation
+ highlightIndex = i
+ default:
+ //there is no failure, so we highlight the leaf ndoe
+ highlightIndex = len(texts) - 1
+ }
+
+ out := ""
+ if veryVerbose {
+ for i := range texts {
+ if i == highlightIndex {
+ out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i])
+ } else {
+ out += r.fi(uint(i), "%s", texts[i])
+ }
+ if len(labels[i]) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
+ }
+ out += "\n"
+ out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
+ }
+ } else {
+ for i := range texts {
+ style := "{{/}}"
+ if i%2 == 1 {
+ style = "{{gray}}"
+ }
+ if i == highlightIndex {
+ style = highlightColor + "{{bold}}"
+ }
+ out += r.f(style+"%s", texts[i])
+ if i < len(texts)-1 {
+ out += " "
+ } else {
+ out += r.f("{{/}}")
+ }
+ }
+ flattenedLabels := report.Labels()
+ if len(flattenedLabels) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
+ }
+ out += "\n"
+ if usePreciseFailureLocation {
+ out += r.f("{{gray}}%s{{/}}", failureLocation)
+ } else {
+ leafLocation := locations[len(locations)-1]
+ if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) {
+ out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation)
+ out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation)
+ } else {
+ out += r.f("{{gray}}%s{{/}}", leafLocation)
+ }
+ }
+
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
new file mode 100644
index 000000000..613072ebf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
@@ -0,0 +1,149 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters
+// this has been removed in V2.
+// Please read the documentation at:
+// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+// for Ginkgo's new behavior and for a migration path.
+type DeprecatedReporter interface {
+ SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
+ BeforeSuiteDidRun(setupSummary *types.SetupSummary)
+ SpecWillRun(specSummary *types.SpecSummary)
+ SpecDidComplete(specSummary *types.SpecSummary)
+ AfterSuiteDidRun(setupSummary *types.SetupSummary)
+ SuiteDidEnd(summary *types.SuiteSummary)
+}
+
+// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and
+// calls the custom reporter's methods with appropriately transformed data from the V2 report.
+//
+// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()`
+//
+// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new
+// reporting support in V2. It will be removed in a future minor version of Ginkgo.
+func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) {
+ conf := config.DeprecatedGinkgoConfigType{
+ RandomSeed: report.SuiteConfig.RandomSeed,
+ RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs,
+ FocusStrings: report.SuiteConfig.FocusStrings,
+ SkipStrings: report.SuiteConfig.SkipStrings,
+ FailOnPending: report.SuiteConfig.FailOnPending,
+ FailFast: report.SuiteConfig.FailFast,
+ FlakeAttempts: report.SuiteConfig.FlakeAttempts,
+ EmitSpecProgress: false,
+ DryRun: report.SuiteConfig.DryRun,
+ ParallelNode: report.SuiteConfig.ParallelProcess,
+ ParallelTotal: report.SuiteConfig.ParallelTotal,
+ SyncHost: report.SuiteConfig.ParallelHost,
+ StreamHost: report.SuiteConfig.ParallelHost,
+ }
+
+ summary := &types.DeprecatedSuiteSummary{
+ SuiteDescription: report.SuiteDescription,
+ SuiteID: report.SuitePath,
+
+ NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs,
+ NumberOfTotalSpecs: report.PreRunStats.TotalSpecs,
+ NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun,
+ }
+
+ reporter.SuiteWillBegin(conf, summary)
+
+ for _, spec := range report.SpecReports {
+ switch spec.LeafNodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.BeforeSuiteDidRun(setupSummary)
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.AfterSuiteDidRun(setupSummary)
+ case types.NodeTypeIt:
+ componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{}
+ componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...)
+ componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...)
+ componentTexts = append(componentTexts, spec.LeafNodeText)
+ componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation)
+
+ specSummary := &types.DeprecatedSpecSummary{
+ ComponentTexts: componentTexts,
+ ComponentCodeLocations: componentCodeLocations,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ NumberOfSamples: spec.NumAttempts,
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.SpecWillRun(specSummary)
+ reporter.SpecDidComplete(specSummary)
+
+ switch spec.State {
+ case types.SpecStatePending:
+ summary.NumberOfPendingSpecs += 1
+ case types.SpecStateSkipped:
+ summary.NumberOfSkippedSpecs += 1
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted:
+ summary.NumberOfFailedSpecs += 1
+ case types.SpecStatePassed:
+ summary.NumberOfPassedSpecs += 1
+ if spec.NumAttempts > 1 {
+ summary.NumberOfFlakedSpecs += 1
+ }
+ }
+ }
+ }
+
+ summary.SuiteSucceeded = report.SuiteSucceeded
+ summary.RunTime = report.RunTime
+
+ reporter.SuiteDidEnd(summary)
+}
+
+func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure {
+ if spec.Failure.IsZero() {
+ return types.DeprecatedSpecFailure{}
+ }
+
+ index := 0
+ switch spec.Failure.FailureNodeContext {
+ case types.FailureNodeInContainer:
+ index = spec.Failure.FailureNodeContainerIndex
+ case types.FailureNodeAtTopLevel:
+ index = -1
+ case types.FailureNodeIsLeafNode:
+ index = len(spec.ContainerHierarchyTexts) - 1
+ if spec.LeafNodeText != "" {
+ index += 1
+ }
+ }
+
+ return types.DeprecatedSpecFailure{
+ Message: spec.Failure.Message,
+ Location: spec.Failure.Location,
+ ForwardedPanic: spec.Failure.ForwardedPanic,
+ ComponentIndex: index,
+ ComponentType: spec.Failure.FailureNodeType,
+ ComponentCodeLocation: spec.Failure.FailureNodeLocation,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
new file mode 100644
index 000000000..be506f9b4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
@@ -0,0 +1,67 @@
+package reporters
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// GenerateJSONReport produces a JSON-formatted report at the passed in destination
+func GenerateJSONReport(report types.Report, destination string) error {
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode([]types.Report{
+ report,
+ })
+ if err != nil {
+ return err
+ }
+ return f.Close()
+}
+
+// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
+// It skips over reports that fail to decode but reports on them via the returned messages []string
+func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
+ messages := []string{}
+ allReports := []types.Report{}
+ for _, source := range sources {
+ reports := []types.Report{}
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = json.Unmarshal(data, &reports)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ allReports = append(allReports, reports...)
+ }
+
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return messages, err
+ }
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode(allReports)
+ if err != nil {
+ return messages, err
+ }
+ return messages, f.Close()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
new file mode 100644
index 000000000..816042208
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -0,0 +1,376 @@
+/*
+
+JUnit XML Reporter for Ginkgo
+
+For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
+
+The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/
+
+*/
+
+package reporters
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type JunitReportConfig struct {
+ // Spec States for which no timeline should be emitted for system-err
+ // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs
+ OmitTimelinesForSpecState types.SpecState
+
+ // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags
+ OmitFailureMessageAttr bool
+
+ //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out
+ OmitCapturedStdOutErr bool
+
+ // Enable OmitSpecLabels to prevent labels from appearing in the spec name
+ OmitSpecLabels bool
+
+ // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
+ OmitLeafNodeType bool
+
+ // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes
+ OmitSuiteSetupNodes bool
+}
+
+type JUnitTestSuites struct {
+ XMLName xml.Name `xml:"testsuites"`
+ // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending and/or skipped
+ Disabled int `xml:"disabled,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all test suites
+ Time float64 `xml:"time,attr"`
+
+ //The set of all test suites
+ TestSuites []JUnitTestSuite `xml:"testsuite"`
+}
+
+type JUnitTestSuite struct {
+ // Name maps onto the description of the test suite - maps onto Report.SuiteDescription
+ Name string `xml:"name,attr"`
+ // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath
+ Package string `xml:"package,attr"`
+ // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending
+ Disabled int `xml:"disabled,attr"`
+ // Skiped maps onto specs that are skipped
+ Skipped int `xml:"skipped,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime
+ Time float64 `xml:"time,attr"`
+ // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime
+ Timestamp string `xml:"timestamp,attr"`
+
+ //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs
+ Properties JUnitProperties `xml:"properties"`
+
+ //TestCases capture the individual specs
+ TestCases []JUnitTestCase `xml:"testcase"`
+}
+
+type JUnitProperties struct {
+ Properties []JUnitProperty `xml:"property"`
+}
+
+func (jup JUnitProperties) WithName(name string) string {
+ for _, property := range jup.Properties {
+ if property.Name == name {
+ return property.Value
+ }
+ }
+ return ""
+}
+
+type JUnitProperty struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
+
+type JUnitTestCase struct {
+ // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
+ Name string `xml:"name,attr"`
+ // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription
+ Classname string `xml:"classname,attr"`
+ // Status maps onto the string representation of SpecReport.State
+ Status string `xml:"status,attr"`
+ // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
+ Time float64 `xml:"time,attr"`
+ //Skipped is populated with a message if the test was skipped or pending
+ Skipped *JUnitSkipped `xml:"skipped,omitempty"`
+ //Error is populated if the test panicked or was interrupted
+ Error *JUnitError `xml:"error,omitempty"`
+ //Failure is populated if the test failed
+ Failure *JUnitFailure `xml:"failure,omitempty"`
+ //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr
+ SystemOut string `xml:"system-out,omitempty"`
+ //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput
+ SystemErr string `xml:"system-err,omitempty"`
+}
+
+type JUnitSkipped struct {
+ // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON)
+ Message string `xml:"message,attr"`
+}
+
+type JUnitError struct {
+ //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted"
+ Message string `xml:"message,attr"`
+ //Type is one of "panicked" or "interrupted"
+ Type string `xml:"type,attr"`
+ //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines
+ Description string `xml:",chardata"`
+}
+
+type JUnitFailure struct {
+ //Message maps onto the failure message - equivalent to SpecReport.Failure.Message
+ Message string `xml:"message,attr"`
+ //Type is "failed"
+ Type string `xml:"type,attr"`
+ //Description maps onto the location and stack trace of the failure
+ Description string `xml:",chardata"`
+}
+
+func GenerateJUnitReport(report types.Report, dst string) error {
+ return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{})
+}
+
+func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error {
+ suite := JUnitTestSuite{
+ Name: report.SuiteDescription,
+ Package: report.SuitePath,
+ Time: report.RunTime.Seconds(),
+ Timestamp: report.StartTime.Format("2006-01-02T15:04:05"),
+ Properties: JUnitProperties{
+ Properties: []JUnitProperty{
+ {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)},
+ {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
+ {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
+ {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
+ {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
+ {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
+ {"LabelFilter", report.SuiteConfig.LabelFilter},
+ {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
+ {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
+ {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
+ {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
+ {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
+ {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
+ {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
+ {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
+ {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
+ {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
+ },
+ },
+ }
+ for _, spec := range report.SpecReports {
+ if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt {
+ continue
+ }
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if config.OmitLeafNodeType {
+ name = ""
+ }
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 && !config.OmitSpecLabels {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ name = strings.TrimSpace(name)
+
+ test := JUnitTestCase{
+ Name: name,
+ Classname: report.SuiteDescription,
+ Status: spec.State.String(),
+ Time: spec.RunTime.Seconds(),
+ }
+ if !spec.State.Is(config.OmitTimelinesForSpecState) {
+ test.SystemErr = systemErrForUnstructuredReporters(spec)
+ }
+ if !config.OmitCapturedStdOutErr {
+ test.SystemOut = systemOutForUnstructuredReporters(spec)
+ }
+ suite.Tests += 1
+
+ switch spec.State {
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ test.Skipped = &JUnitSkipped{Message: message}
+ suite.Skipped += 1
+ case types.SpecStatePending:
+ test.Skipped = &JUnitSkipped{Message: "pending"}
+ suite.Disabled += 1
+ case types.SpecStateFailed:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "failed",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateTimedout:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "timedout",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateInterrupted:
+ test.Error = &JUnitError{
+ Message: spec.Failure.Message,
+ Type: "interrupted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStateAborted:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "aborted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStatePanicked:
+ test.Error = &JUnitError{
+ Message: spec.Failure.ForwardedPanic,
+ Type: "panicked",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ }
+
+ suite.TestCases = append(suite.TestCases, test)
+ }
+
+ junitReport := JUnitTestSuites{
+ Tests: suite.Tests,
+ Disabled: suite.Disabled + suite.Skipped,
+ Errors: suite.Errors,
+ Failures: suite.Failures,
+ Time: suite.Time,
+ TestSuites: []JUnitTestSuite{suite},
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(junitReport)
+
+ return f.Close()
+}
+
+func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ mergedReport := JUnitTestSuites{}
+ for _, source := range sources {
+ report := JUnitTestSuites{}
+ f, err := os.Open(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = xml.NewDecoder(f).Decode(&report)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+
+ mergedReport.Tests += report.Tests
+ mergedReport.Disabled += report.Disabled
+ mergedReport.Errors += report.Errors
+ mergedReport.Failures += report.Failures
+ mergedReport.Time += report.Time
+ mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return messages, err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(mergedReport)
+
+ return messages, f.Close()
+}
+
+func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true)
+ if len(spec.AdditionalFailures) > 0 {
+ out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n")
+ }
+ return out.String()
+}
+
+func systemErrForUnstructuredReporters(spec types.SpecReport) string {
+ return RenderTimeline(spec, true)
+}
+
+func RenderTimeline(spec types.SpecReport, noColor bool) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline())
+ return out.String()
+}
+
+func systemOutForUnstructuredReporters(spec types.SpecReport) string {
+ return spec.CapturedStdOutErr
+}
+
+// Deprecated JUnitReporter (so folks can still compile their suites)
+type JUnitReporter struct{}
+
+func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} }
+func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {}
+func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
new file mode 100644
index 000000000..5e726c464
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
@@ -0,0 +1,29 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Reporter interface {
+ SuiteWillBegin(report types.Report)
+ WillRun(report types.SpecReport)
+ DidRun(report types.SpecReport)
+ SuiteDidEnd(report types.Report)
+
+ //Timeline emission
+ EmitFailure(state types.SpecState, failure types.Failure)
+ EmitProgressReport(progressReport types.ProgressReport)
+ EmitReportEntry(entry types.ReportEntry)
+ EmitSpecEvent(event types.SpecEvent)
+}
+
+type NoopReporter struct{}
+
+func (n NoopReporter) SuiteWillBegin(report types.Report) {}
+func (n NoopReporter) WillRun(report types.SpecReport) {}
+func (n NoopReporter) DidRun(report types.SpecReport) {}
+func (n NoopReporter) SuiteDidEnd(report types.Report) {}
+func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {}
+func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
+func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {}
+func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
new file mode 100644
index 000000000..e990ad82e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
@@ -0,0 +1,105 @@
+/*
+
+TeamCity Reporter for Ginkgo
+
+Makes use of TeamCity's support for Service Messages
+http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
+*/
+
+package reporters
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func tcEscape(s string) string {
+ s = strings.ReplaceAll(s, "|", "||")
+ s = strings.ReplaceAll(s, "'", "|'")
+ s = strings.ReplaceAll(s, "\n", "|n")
+ s = strings.ReplaceAll(s, "\r", "|r")
+ s = strings.ReplaceAll(s, "[", "|[")
+ s = strings.ReplaceAll(s, "]", "|]")
+ return s
+}
+
+func GenerateTeamcityReport(report types.Report, dst string) error {
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+
+ name := report.SuiteDescription
+ labels := report.SuiteLabels
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
+ for _, spec := range report.SpecReports {
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+
+ name = tcEscape(name)
+ fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
+ switch spec.State {
+ case types.SpecStatePending:
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name)
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message))
+ case types.SpecStateFailed:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStatePanicked:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details))
+ case types.SpecStateTimedout:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateInterrupted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateAborted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ }
+
+ fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0))
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription))
+
+ return f.Close()
+}
+
+func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ merged := []byte{}
+ for _, source := range sources {
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ merged = append(merged, data...)
+ }
+ return messages, os.WriteFile(dst, merged, 0666)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
new file mode 100644
index 000000000..f33786a2d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -0,0 +1,182 @@
+package ginkgo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Report represents the report for a Suite.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report
+*/
+type Report = types.Report
+
+/*
+Report represents the report for a Spec.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type SpecReport = types.SpecReport
+
+/*
+CurrentSpecReport returns information about the current running spec.
+The returned object is a types.SpecReport which includes helper methods
+to make extracting information about the spec easier.
+
+You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
+You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+*/
+func CurrentSpecReport() SpecReport {
+ return global.Suite.CurrentSpecReport()
+}
+
+/*
+ ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+
+- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted.
+- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior).
+- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`).
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+type ReportEntryVisibility = types.ReportEntryVisibility
+
+const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever
+
+/*
+AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport.
+It can take any of the following arguments:
+ - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
+ - A ReportEntryVisibility enum to control the visibility of the ReportEntry
+ - An Offset or CodeLocation decoration to control the reported location of the ReportEntry
+
+If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console.
+
+AddReportEntry() must be called within a Subject or Setup node - not in a Container node.
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+func AddReportEntry(name string, args ...interface{}) {
+ cl := types.NewCodeLocation(1)
+ reportEntry, err := internal.NewReportEntry(name, cl, args...)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1)
+ }
+ err = global.Suite.AddReportEntry(reportEntry)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1)
+ }
+}
+
+/*
+ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
+receives a SpecReport. They are called before the spec starts.
+
+You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
+You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+*/
+func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
+}
+
+/*
+ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that
+receives a SpecReport. They are called after the spec has completed and receive the final report for the spec.
+
+You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
+You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+*/
+func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
+}
+
+/*
+ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
+
+They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
+ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+*/
+func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
+}
+
+/*
+ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
+
+They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
+ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
+all parallel nodes
+
+In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+*/
+func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
+}
+
+func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
+ body := func(report Report) {
+ if reporterConfig.JSONReport != "" {
+ err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.JUnitReport != "" {
+ err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.TeamcityReport != "" {
+ err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error()))
+ }
+ }
+ }
+
+ flags := []string{}
+ if reporterConfig.JSONReport != "" {
+ flags = append(flags, "--json-report")
+ }
+ if reporterConfig.JUnitReport != "" {
+ flags = append(flags, "--junit-report")
+ }
+ if reporterConfig.TeamcityReport != "" {
+ flags = append(flags, "--teamcity-report")
+ }
+ pushNode(internal.NewNode(
+ deprecationTracker, types.NodeTypeReportAfterSuite,
+ fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
+ body,
+ types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ ))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
new file mode 100644
index 000000000..ac9b7abb5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -0,0 +1,309 @@
+package ginkgo
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via:
+
+ fmt.Sprintf(formatString, parameters...)
+
+where parameters are the parameters passed into the entry.
+
+When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions.
+
+You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions
+*/
+type EntryDescription string
+
+func (ed EntryDescription) render(args ...interface{}) string {
+ return fmt.Sprintf(string(ed), args...)
+}
+
+/*
+DescribeTable describes a table-driven spec.
+
+For example:
+
+ DescribeTable("a simple table",
+ func(x int, y int, expected bool) {
+ Ω(x > y).Should(Equal(expected))
+ },
+ Entry("x > y", 1, 0, true),
+ Entry("x == y", 0, 0, false),
+ Entry("x < y", 0, 1, false),
+ )
+
+You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTable = PDescribeTable
+
+/*
+TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
+*/
+type TableEntry struct {
+ description interface{}
+ decorations []interface{}
+ parameters []interface{}
+ codeLocation types.CodeLocation
+}
+
+/*
+Entry constructs a TableEntry.
+
+The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description.
+Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table.
+
+Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in.
+
+If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators.
+
+You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
+*/
+func Entry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can focus a particular entry with FEntry. This is equivalent to FIt.
+*/
+func FEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Focus)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
+*/
+func PEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Pending)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
+*/
+var XEntry = PEntry
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func generateTable(description string, args ...interface{}) {
+ GinkgoHelper()
+ cl := types.NewCodeLocation(0)
+ containerNodeArgs := []interface{}{cl}
+
+ entries := []TableEntry{}
+ var itBody interface{}
+ var itBodyType reflect.Type
+
+ var tableLevelEntryDescription interface{}
+ tableLevelEntryDescription = func(args ...interface{}) string {
+ out := []string{}
+ for _, arg := range args {
+ out = append(out, fmt.Sprint(arg))
+ }
+ return "Entry: " + strings.Join(out, ", ")
+ }
+
+ if len(args) == 1 {
+ exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl))
+ }
+
+ for i, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl))
+ case t == reflect.TypeOf(TableEntry{}):
+ entries = append(entries, arg.(TableEntry))
+ case t == reflect.TypeOf([]TableEntry{}):
+ entries = append(entries, arg.([]TableEntry)...)
+ case t == reflect.TypeOf(EntryDescription("")):
+ tableLevelEntryDescription = arg.(EntryDescription).render
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ tableLevelEntryDescription = arg
+ case t.Kind() == reflect.Func:
+ if itBody != nil {
+ exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl))
+ }
+ itBody = arg
+ itBodyType = reflect.TypeOf(itBody)
+ default:
+ containerNodeArgs = append(containerNodeArgs, arg)
+ }
+ }
+
+ containerNodeArgs = append(containerNodeArgs, func() {
+ for _, entry := range entries {
+ var err error
+ entry := entry
+ var description string
+ switch t := reflect.TypeOf(entry.description); {
+ case t == nil:
+ err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String()
+ }
+ case t == reflect.TypeOf(EntryDescription("")):
+ description = entry.description.(EntryDescription).render(entry.parameters...)
+ case t == reflect.TypeOf(""):
+ description = entry.description.(string)
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(entry.description, entry.parameters)[0].String()
+ }
+ default:
+ err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
+ }
+
+ itNodeArgs := []interface{}{entry.codeLocation}
+ itNodeArgs = append(itNodeArgs, entry.decorations...)
+
+ hasContext := false
+ if itBodyType.NumIn() > 0. {
+ if itBodyType.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) {
+ hasContext = true
+ }
+ }
+
+ if err == nil {
+ err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext)
+ }
+
+ if hasContext {
+ itNodeArgs = append(itNodeArgs, func(c SpecContext) {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(itBody, append([]interface{}{c}, entry.parameters...))
+ })
+ } else {
+ itNodeArgs = append(itNodeArgs, func() {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(itBody, entry.parameters)
+ })
+ }
+
+ pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...))
+ }
+ })
+
+ pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
+}
+
+func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+ inValues := make([]reflect.Value, len(parameters))
+
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+
+ for i := 0; i < limit && i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], funcType.In(i))
+ }
+
+ if funcType.IsVariadic() {
+ variadicType := funcType.In(limit).Elem()
+ for i := limit; i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], variadicType)
+ }
+ }
+
+ return reflect.ValueOf(function).Call(inValues)
+}
+
+func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ offset := 0
+ if hasContext {
+ limit = limit - 1
+ offset = 1
+ }
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+ if len(parameters) < limit {
+ return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ if len(parameters) > limit && !funcType.IsVariadic() {
+ return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ var i = 0
+ for ; i < limit; i++ {
+ actual := reflect.TypeOf(parameters[i])
+ expected := funcType.In(i + offset)
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl)
+ }
+ }
+ if funcType.IsVariadic() {
+ expected := funcType.In(limit + offset).Elem()
+ for ; i < len(parameters); i++ {
+ actual := reflect.TypeOf(parameters[i])
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl)
+ }
+ }
+ }
+
+ return nil
+}
+
+func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+ if parameter == nil {
+ return reflect.Zero(t)
+ } else {
+ return reflect.ValueOf(parameter)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
new file mode 100644
index 000000000..9cd576817
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
@@ -0,0 +1,159 @@
+package types
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+)
+
+type CodeLocation struct {
+ FileName string `json:",omitempty"`
+ LineNumber int `json:",omitempty"`
+ FullStackTrace string `json:",omitempty"`
+ CustomMessage string `json:",omitempty"`
+}
+
+func (codeLocation CodeLocation) String() string {
+ if codeLocation.CustomMessage != "" {
+ return codeLocation.CustomMessage
+ }
+ return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
+}
+
+func (codeLocation CodeLocation) ContentsOfLine() string {
+ if codeLocation.CustomMessage != "" {
+ return ""
+ }
+ contents, err := os.ReadFile(codeLocation.FileName)
+ if err != nil {
+ return ""
+ }
+ lines := strings.Split(string(contents), "\n")
+ if len(lines) < codeLocation.LineNumber {
+ return ""
+ }
+ return lines[codeLocation.LineNumber-1]
+}
+
+type codeLocationLocator struct {
+ pcs map[uintptr]bool
+ helpers map[string]bool
+ lock *sync.Mutex
+}
+
+func (c *codeLocationLocator) addHelper(pc uintptr) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.pcs[pc] {
+ return
+ }
+ c.lock.Unlock()
+ f := runtime.FuncForPC(pc)
+ c.lock.Lock()
+ if f == nil {
+ return
+ }
+ c.helpers[f.Name()] = true
+ c.pcs[pc] = true
+}
+
+func (c *codeLocationLocator) hasHelper(name string) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.helpers[name]
+}
+
+func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation {
+ pc := make([]uintptr, 40)
+ n := runtime.Callers(skip+2, pc)
+ if n == 0 {
+ return CodeLocation{}
+ }
+ pc = pc[:n]
+ frames := runtime.CallersFrames(pc)
+ for {
+ frame, more := frames.Next()
+ if !c.hasHelper(frame.Function) {
+ return CodeLocation{FileName: frame.File, LineNumber: frame.Line}
+ }
+ if !more {
+ break
+ }
+ }
+ return CodeLocation{}
+}
+
+var clLocator = &codeLocationLocator{
+ pcs: map[uintptr]bool{},
+ helpers: map[string]bool{},
+ lock: &sync.Mutex{},
+}
+
+// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers.
+func MarkAsHelper(optionalSkip ...int) {
+ skip := 1
+ if len(optionalSkip) > 0 {
+ skip += optionalSkip[0]
+ }
+ pc, _, _, ok := runtime.Caller(skip)
+ if ok {
+ clLocator.addHelper(pc)
+ }
+}
+
+func NewCustomCodeLocation(message string) CodeLocation {
+ return CodeLocation{
+ CustomMessage: message,
+ }
+}
+
+func NewCodeLocation(skip int) CodeLocation {
+ return clLocator.getCodeLocation(skip + 1)
+}
+
+func NewCodeLocationWithStackTrace(skip int) CodeLocation {
+ cl := clLocator.getCodeLocation(skip + 1)
+ cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1)
+ return cl
+}
+
+// PruneStack removes references to functions that are internal to Ginkgo
+// and the Go runtime from a stack string and a certain number of stack entries
+// at the beginning of the stack. The stack string has the format
+// as returned by runtime/debug.Stack. The leading goroutine information is
+// optional and always removed if present. Beware that runtime/debug.Stack
+// adds itself as first entry, so typically skip must be >= 1 to remove that
+// entry.
+func PruneStack(fullStackTrace string, skip int) string {
+ stack := strings.Split(fullStackTrace, "\n")
+ // Ensure that the even entries are the method names and the
+ // odd entries the source code information.
+ if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
+ // Ignore "goroutine 29 [running]:" line.
+ stack = stack[1:]
+ }
+ // The "+1" is for skipping over the initial entry, which is
+ // runtime/debug.Stack() itself.
+ if len(stack) > 2*(skip+1) {
+ stack = stack[2*(skip+1):]
+ }
+ prunedStack := []string{}
+ if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" {
+ prunedStack = stack
+ } else {
+ re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+ for i := 0; i < len(stack)/2; i++ {
+ // We filter out based on the source code file name.
+ if !re.Match([]byte(stack[i*2+1])) {
+ prunedStack = append(prunedStack, stack[i*2])
+ prunedStack = append(prunedStack, stack[i*2+1])
+ }
+ }
+ }
+ return strings.Join(prunedStack, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
new file mode 100644
index 000000000..1014c7b49
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -0,0 +1,757 @@
+/*
+Ginkgo accepts a number of configuration options.
+These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
+*/
+
+package types
+
+import (
+ "flag"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration controlling how an individual test suite is run
+type SuiteConfig struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ FocusStrings []string
+ SkipStrings []string
+ FocusFiles []string
+ SkipFiles []string
+ LabelFilter string
+ FailOnPending bool
+ FailFast bool
+ FlakeAttempts int
+ DryRun bool
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ Timeout time.Duration
+ EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually
+ OutputInterceptorMode string
+ SourceRoots []string
+ GracePeriod time.Duration
+
+ ParallelProcess int
+ ParallelTotal int
+ ParallelHost string
+}
+
+func NewDefaultSuiteConfig() SuiteConfig {
+ return SuiteConfig{
+ RandomSeed: time.Now().Unix(),
+ Timeout: time.Hour,
+ ParallelProcess: 1,
+ ParallelTotal: 1,
+ GracePeriod: 30 * time.Second,
+ }
+}
+
+type VerbosityLevel uint
+
+const (
+ VerbosityLevelSuccinct VerbosityLevel = iota
+ VerbosityLevelNormal
+ VerbosityLevelVerbose
+ VerbosityLevelVeryVerbose
+)
+
+func (vl VerbosityLevel) GT(comp VerbosityLevel) bool {
+ return vl > comp
+}
+
+func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool {
+ return vl >= comp
+}
+
+func (vl VerbosityLevel) Is(comp VerbosityLevel) bool {
+ return vl == comp
+}
+
+func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool {
+ return vl <= comp
+}
+
+func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
+ return vl < comp
+}
+
+// Configuration for Ginkgo's reporter
+type ReporterConfig struct {
+ NoColor bool
+ Succinct bool
+ Verbose bool
+ VeryVerbose bool
+ FullTrace bool
+ ShowNodeEvents bool
+
+ JSONReport string
+ JUnitReport string
+ TeamcityReport string
+}
+
+func (rc ReporterConfig) Verbosity() VerbosityLevel {
+ if rc.Succinct {
+ return VerbosityLevelSuccinct
+ } else if rc.Verbose {
+ return VerbosityLevelVerbose
+ } else if rc.VeryVerbose {
+ return VerbosityLevelVeryVerbose
+ }
+ return VerbosityLevelNormal
+}
+
+func (rc ReporterConfig) WillGenerateReport() bool {
+ return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
+}
+
+func NewDefaultReporterConfig() ReporterConfig {
+ return ReporterConfig{}
+}
+
+// Configuration for the Ginkgo CLI
+type CLIConfig struct {
+ //for build, run, and watch
+ Recurse bool
+ SkipPackage string
+ RequireSuite bool
+ NumCompilers int
+
+ //for run and watch only
+ Procs int
+ Parallel bool
+ AfterRunHook string
+ OutputDir string
+ KeepSeparateCoverprofiles bool
+ KeepSeparateReports bool
+
+ //for run only
+ KeepGoing bool
+ UntilItFails bool
+ Repeat int
+ RandomizeSuites bool
+
+ //for watch only
+ Depth int
+ WatchRegExp string
+}
+
+func NewDefaultCLIConfig() CLIConfig {
+ return CLIConfig{
+ Depth: 1,
+ WatchRegExp: `\.go$`,
+ }
+}
+
+func (g CLIConfig) ComputedProcs() int {
+ if g.Procs > 0 {
+ return g.Procs
+ }
+
+ n := 1
+ if g.Parallel {
+ n = runtime.NumCPU()
+ if n > 4 {
+ n = n - 1
+ }
+ }
+ return n
+}
+
+func (g CLIConfig) ComputedNumCompilers() int {
+ if g.NumCompilers > 0 {
+ return g.NumCompilers
+ }
+
+ return runtime.NumCPU()
+}
+
+// Configuration for the Ginkgo CLI capturing available go flags
+// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags).
+// More details can be found at:
+// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/
+type GoFlagsConfig struct {
+ //build-time flags for code-and-performance analysis
+ Race bool
+ Cover bool
+ CoverMode string
+ CoverPkg string
+ Vet string
+
+ //run-time flags for code-and-performance analysis
+ BlockProfile string
+ BlockProfileRate int
+ CoverProfile string
+ CPUProfile string
+ MemProfile string
+ MemProfileRate int
+ MutexProfile string
+ MutexProfileFraction int
+ Trace string
+
+ //build-time flags for building
+ A bool
+ ASMFlags string
+ BuildMode string
+ Compiler string
+ GCCGoFlags string
+ GCFlags string
+ InstallSuffix string
+ LDFlags string
+ LinkShared bool
+ Mod string
+ N bool
+ ModFile string
+ ModCacheRW bool
+ MSan bool
+ PkgDir string
+ Tags string
+ TrimPath bool
+ ToolExec string
+ Work bool
+ X bool
+}
+
+func NewDefaultGoFlagsConfig() GoFlagsConfig {
+ return GoFlagsConfig{}
+}
+
+func (g GoFlagsConfig) BinaryMustBePreserved() bool {
+ return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
+}
+
+// Configuration that were deprecated in 2.0
+type deprecatedConfig struct {
+ DebugParallel bool
+ NoisySkippings bool
+ NoisyPendings bool
+ RegexScansFilePath bool
+ SlowSpecThresholdWithFLoatUnits float64
+ Stream bool
+ Notify bool
+ EmitSpecProgress bool
+ SlowSpecThreshold time.Duration
+ AlwaysEmitGinkgoWriter bool
+}
+
+// Flags
+
+// Flags sections used by both the CLI and the Ginkgo test process
+var FlagSections = GinkgoFlagSections{
+ {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"},
+ {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"},
+ {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"},
+ {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism",
+ Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."},
+ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
+ {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
+ {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
+ Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
+ {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
+ {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"},
+ {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true,
+ Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."},
+}
+
+// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
+var SuiteConfigFlags = GinkgoFlags{
+ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
+ Usage: "The seed used to randomize the spec suite."},
+ {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
+
+ {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."},
+ {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
+ {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
+
+ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
+ {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
+ Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
+ {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
+ Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."},
+ {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug",
+ Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."},
+ {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h",
+ Usage: "Test suite fails if it does not complete within the specified timeout."},
+ {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s",
+ Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."},
+ {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none",
+ Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."},
+
+ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
+ Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
+ {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."},
+
+ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug",
+ DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."},
+}
+
+// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
+var ParallelConfigFlags = GinkgoFlags{
+ {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "This worker process's (one-indexed) process number. For running specs in parallel."},
+ {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "The total number of worker processes. For running specs in parallel."},
+ {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI",
+ Usage: "The address for the server that will synchronize the processes."},
+}
+
+// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
+var ReporterConfigFlags = GinkgoFlags{
+ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, suppress color output in default reporter."},
+ {KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
+ Usage: "If set, emits more output including GinkgoWriter contents."},
+ {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
+ Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."},
+ {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output",
+ Usage: "If set, default reporter prints out a very succinct report"},
+ {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
+ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
+ {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
+ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
+
+ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
+ {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
+ Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
+ {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."},
+
+ {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold",
+ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
+ {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"},
+ {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."},
+}
+
+// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
+func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
+ flags = flags.WithPrefix("ginkgo")
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "D": &deprecatedConfig{},
+ }
+ extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"}
+
+ return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection)
+}
+
+// VetConfig validates that the Ginkgo test process' configuration is sound
+func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error {
+ errors := []error{}
+
+ if flagSet.WasSet("count") || flagSet.WasSet("test.count") {
+ flag := flagSet.Lookup("count")
+ if flag == nil {
+ flag = flagSet.Lookup("test.count")
+ }
+ count, err := strconv.Atoi(flag.Value.String())
+ if err != nil || count != 1 {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagCount())
+ }
+ }
+
+ if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagParallel())
+ }
+
+ if suiteConfig.ParallelTotal < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration())
+ }
+
+ if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration())
+ }
+
+ if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" {
+ errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration())
+ }
+
+ if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 {
+ errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration())
+ }
+
+ if suiteConfig.GracePeriod <= 0 {
+ errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero())
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.FocusFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.SkipFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ _, err := ParseLabelFilter(suiteConfig.LabelFilter)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "", "dup", "swap", "none":
+ default:
+ errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode))
+ }
+
+ numVerbosity := 0
+ for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} {
+ if v {
+ numVerbosity++
+ }
+ }
+ if numVerbosity > 1 {
+ errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration())
+ }
+
+ return errors
+}
+
+// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands
+var GinkgoCLISharedFlags = GinkgoFlags{
+ {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites",
+ Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."},
+ {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "comma-separated list of packages",
+ Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."},
+ {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."},
+ {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)",
+ Usage: "When running multiple packages, the number of concurrent compilations to perform."},
+}
+
+// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run)
+var GinkgoCLIRunAndWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "The number of parallel test nodes to run."},
+ {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "--nodes is an alias for --procs"},
+ {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel",
+ Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."},
+ {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Command to run when a test suite completes."},
+ {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support",
+ Usage: "A location to place all generated profiles and reports."},
+ {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis",
+ Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."},
+ {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output",
+ Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."},
+
+ {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands
+var GinkgoCLIRunFlags = GinkgoFlags{
+ {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, failures from earlier test suites do not prevent later test suites from running."},
+ {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."},
+ {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once",
+ Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."},
+ {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize the order in which test suites run."},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands
+var GinkgoCLIWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch",
+ Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."},
+ {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "Regular Expression",
+ UsageDefaultValue: `\.go$`,
+ Usage: "Only files matching this regular expression will be watched for changes."},
+}
+
+// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
+var GoBuildFlags = GinkgoFlags{
+ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
+ Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
+ {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
+ Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
+ {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
+ Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`},
+ {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis",
+ Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."},
+
+ {KeyPath: "Go.A", Name: "a", SectionKey: "go-build",
+ Usage: "force rebuilding of packages that are already up-to-date."},
+ {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool asm invocation."},
+ {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
+ Usage: "build mode to use. See 'go help buildmode' for more."},
+ {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
+ Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
+ {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each gccgo compiler/linker invocation."},
+ {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool compile invocation."},
+ {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build",
+ Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."},
+ {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool link invocation."},
+ {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build",
+ Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."},
+ {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build",
+ Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."},
+ {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build",
+ Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."},
+ {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build",
+ Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`},
+ {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build",
+ Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."},
+ {KeyPath: "Go.N", Name: "n", SectionKey: "go-build",
+ Usage: "print the commands but do not run them."},
+ {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build",
+ Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."},
+ {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build",
+ Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"},
+ {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build",
+ Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`},
+ {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build",
+ Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."},
+ {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build",
+ Usage: "print the name of the temporary work directory and do not delete it when exiting."},
+ {KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
+ Usage: "print the commands."},
+}
+
+// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
+var GoRunFlags = GinkgoFlags{
+ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
+ {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`},
+ {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`},
+ {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`},
+ {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis",
+ Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`},
+ {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis",
+ Usage: `Write an execution trace to the specified file before exiting.`},
+}
+
+// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound
+// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers
+func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) {
+ errors := []error{}
+
+ if cliConfig.Repeat > 0 && cliConfig.UntilItFails {
+ errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
+ }
+
+ //initialize the output directory
+ if cliConfig.OutputDir != "" {
+ err := os.MkdirAll(cliConfig.OutputDir, 0777)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ //ensure cover mode is configured appropriately
+ if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+ if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" {
+ goFlagsConfig.CoverProfile = "coverprofile.out"
+ }
+
+ return cliConfig, goFlagsConfig, errors
+}
+
+// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+ // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
+ // the built test binary can generate a coverprofile
+ if goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+
+ if goFlagsConfig.CoverPkg != "" {
+ coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",")
+ adjustedCoverPkgs := make([]string, len(coverPkgs))
+ for i, coverPkg := range coverPkgs {
+ coverPkg = strings.Trim(coverPkg, " ")
+ if strings.HasPrefix(coverPkg, "./") {
+ // this is a relative coverPkg - we need to reroot it
+ adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./"))
+ } else {
+ // this is a package name - don't touch it
+ adjustedCoverPkgs[i] = coverPkg
+ }
+ }
+ goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
+ }
+
+ args := []string{"test", "-c", "-o", destination, packageToBuild}
+ goArgs, err := GenerateFlagArgs(
+ GoBuildFlags,
+ map[string]interface{}{
+ "Go": &goFlagsConfig,
+ },
+ )
+
+ if err != nil {
+ return []string{}, err
+ }
+ args = append(args, goArgs...)
+ return args, nil
+}
+
+// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary
+func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) {
+ var flags GinkgoFlags
+ flags = SuiteConfigFlags.WithPrefix("ginkgo")
+ flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
+ bindings := map[string]interface{}{
+ "S": &suiteConfig,
+ "R": &reporterConfig,
+ "Go": &goFlagsConfig,
+ }
+
+ return GenerateFlagArgs(flags, bindings)
+}
+
+// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
+func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
+ flags := GoRunFlags.WithPrefix("test")
+ bindings := map[string]interface{}{
+ "Go": &goFlagsConfig,
+ }
+
+ args, err := GenerateFlagArgs(flags, bindings)
+ if err != nil {
+ return args, err
+ }
+ args = append(args, "--test.v")
+ return args, nil
+}
+
+// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command
+func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command
+func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIWatchFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command
+func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags
+ flags = flags.CopyAppend(GoBuildFlags...)
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Building Multiple Suites"
+ }
+ if flagSections[i].Key == "go-build" {
+ flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags",
+ Description: "These flags are inherited from go build."}
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
+
+func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Fetching Labels from Multiple Suites"
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
new file mode 100644
index 000000000..17922304b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "strconv"
+ "time"
+)
+
+/*
+ A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters.
+*/
+
+type SuiteSummary = DeprecatedSuiteSummary
+type SetupSummary = DeprecatedSetupSummary
+type SpecSummary = DeprecatedSpecSummary
+type SpecMeasurement = DeprecatedSpecMeasurement
+type SpecComponentType = NodeType
+type SpecFailure = DeprecatedSpecFailure
+
+var (
+ SpecComponentTypeInvalid = NodeTypeInvalid
+ SpecComponentTypeContainer = NodeTypeContainer
+ SpecComponentTypeIt = NodeTypeIt
+ SpecComponentTypeBeforeEach = NodeTypeBeforeEach
+ SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach
+ SpecComponentTypeAfterEach = NodeTypeAfterEach
+ SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach
+ SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite
+ SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite
+ SpecComponentTypeAfterSuite = NodeTypeAfterSuite
+ SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite
+)
+
+type DeprecatedSuiteSummary struct {
+ SuiteDescription string
+ SuiteSucceeded bool
+ SuiteID string
+
+ NumberOfSpecsBeforeParallelization int
+ NumberOfTotalSpecs int
+ NumberOfSpecsThatWillBeRun int
+ NumberOfPendingSpecs int
+ NumberOfSkippedSpecs int
+ NumberOfPassedSpecs int
+ NumberOfFailedSpecs int
+ NumberOfFlakedSpecs int
+ RunTime time.Duration
+}
+
+type DeprecatedSetupSummary struct {
+ ComponentType SpecComponentType
+ CodeLocation CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+
+ CapturedOutput string
+ SuiteID string
+}
+
+type DeprecatedSpecSummary struct {
+ ComponentTexts []string
+ ComponentCodeLocations []CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+ IsMeasurement bool
+ NumberOfSamples int
+ Measurements map[string]*DeprecatedSpecMeasurement
+
+ CapturedOutput string
+ SuiteID string
+}
+
+func (s DeprecatedSpecSummary) HasFailureState() bool {
+ return s.State.Is(SpecStateFailureStates)
+}
+
+func (s DeprecatedSpecSummary) TimedOut() bool {
+ return false
+}
+
+func (s DeprecatedSpecSummary) Panicked() bool {
+ return s.State == SpecStatePanicked
+}
+
+func (s DeprecatedSpecSummary) Failed() bool {
+ return s.State == SpecStateFailed
+}
+
+func (s DeprecatedSpecSummary) Passed() bool {
+ return s.State == SpecStatePassed
+}
+
+func (s DeprecatedSpecSummary) Skipped() bool {
+ return s.State == SpecStateSkipped
+}
+
+func (s DeprecatedSpecSummary) Pending() bool {
+ return s.State == SpecStatePending
+}
+
+type DeprecatedSpecFailure struct {
+ Message string
+ Location CodeLocation
+ ForwardedPanic string
+
+ ComponentIndex int
+ ComponentType SpecComponentType
+ ComponentCodeLocation CodeLocation
+}
+
+type DeprecatedSpecMeasurement struct {
+ Name string
+ Info interface{}
+ Order int
+
+ Results []float64
+
+ Smallest float64
+ Largest float64
+ Average float64
+ StdDeviation float64
+
+ SmallestLabel string
+ LargestLabel string
+ AverageLabel string
+ Units string
+ Precision int
+}
+
+func (s DeprecatedSpecMeasurement) PrecisionFmt() string {
+ if s.Precision == 0 {
+ return "%f"
+ }
+
+ str := strconv.Itoa(s.Precision)
+
+ return "%." + str + "f"
+}
diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
similarity index 64%
rename from vendor/github.com/onsi/ginkgo/types/deprecation_support.go
rename to vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
index d5a6658f3..e2519f673 100644
--- a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
@@ -4,10 +4,10 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"unicode"
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/formatter"
+ "github.com/onsi/ginkgo/v2/formatter"
)
type Deprecation struct {
@@ -22,20 +22,12 @@ var Deprecations = deprecations{}
func (d deprecations) CustomReporter() Deprecation {
return Deprecation{
- Message: "You are using a custom reporter. Support for custom reporters will likely be removed in V2. Most users were using them to generate junit or teamcity reports and this functionality will be merged into the core reporter. In addition, Ginkgo 2.0 will support emitting a JSON-formatted report that users can then manipulate to generate custom reports.\n\n{{red}}{{bold}}If this change will be impactful to you please leave a comment on {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}",
+ Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:",
DocLink: "removed-custom-reporters",
Version: "1.16.0",
}
}
-func (d deprecations) V1Reporter() Deprecation {
- return Deprecation{
- Message: "You are using a V1 Ginkgo Reporter. Please update your custom reporter to the new V2 Reporter interface.",
- DocLink: "changed-reporter-interface",
- Version: "1.16.0",
- }
-}
-
func (d deprecations) Async() Deprecation {
return Deprecation{
Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
@@ -46,7 +38,7 @@ func (d deprecations) Async() Deprecation {
func (d deprecations) Measure() Deprecation {
return Deprecation{
- Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
+ Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.",
DocLink: "removed-measure",
Version: "1.16.3",
}
@@ -56,7 +48,15 @@ func (d deprecations) ParallelNode() Deprecation {
return Deprecation{
Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
DocLink: "renamed-ginkgoparallelnode",
- Version: "1.16.5",
+ Version: "1.16.4",
+ }
+}
+
+func (d deprecations) CurrentGinkgoTestDescription() Deprecation {
+ return Deprecation{
+ Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.",
+ DocLink: "changed-currentginkgotestdescription",
+ Version: "1.16.0",
}
}
@@ -75,13 +75,30 @@ func (d deprecations) Blur() Deprecation {
}
}
+func (d deprecations) Nodot() Deprecation {
+ return Deprecation{
+ Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.",
+ DocLink: "removed-ginkgo-nodot",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) SuppressProgressReporting() Deprecation {
+ return Deprecation{
+ Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.",
+ Version: "2.5.0",
+ }
+}
+
type DeprecationTracker struct {
deprecations map[Deprecation][]CodeLocation
+ lock *sync.Mutex
}
func NewDeprecationTracker() *DeprecationTracker {
return &DeprecationTracker{
deprecations: map[Deprecation][]CodeLocation{},
+ lock: &sync.Mutex{},
}
}
@@ -95,6 +112,8 @@ func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...Cod
}
}
+ d.lock.Lock()
+ defer d.lock.Unlock()
if len(cl) == 1 {
d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
} else {
@@ -103,29 +122,27 @@ func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...Cod
}
func (d *DeprecationTracker) DidTrackDeprecations() bool {
+ d.lock.Lock()
+ defer d.lock.Unlock()
return len(d.deprecations) > 0
}
func (d *DeprecationTracker) DeprecationsReport() string {
- out := formatter.F("\n{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
out += formatter.F("{{light-yellow}}============================================={{/}}\n")
- out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n")
- out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n")
- out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n")
- out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n")
- out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
-
for deprecation, locations := range d.deprecations {
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
if deprecation.DocLink != "" {
- out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
+ out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink)
}
for _, location := range locations {
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
}
}
out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
- out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", config.VERSION)
+ out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION)
return out
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
new file mode 100644
index 000000000..1d96ae028
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
@@ -0,0 +1,43 @@
+package types
+
+import "encoding/json"
+
+type EnumSupport struct {
+ toString map[uint]string
+ toEnum map[string]uint
+ maxEnum uint
+}
+
+func NewEnumSupport(toString map[uint]string) EnumSupport {
+ toEnum, maxEnum := map[string]uint{}, uint(0)
+ for k, v := range toString {
+ toEnum[v] = k
+ if maxEnum < k {
+ maxEnum = k
+ }
+ }
+ return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum}
+}
+
+func (es EnumSupport) String(e uint) string {
+ if e > es.maxEnum {
+ return es.toString[0]
+ }
+ return es.toString[e]
+}
+
+func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) {
+ var dec string
+ if err := json.Unmarshal(b, &dec); err != nil {
+ return 0, err
+ }
+ out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway
+ return out, nil
+}
+
+func (es EnumSupport) MarshJSON(e uint) ([]byte, error) {
+ if e == 0 || e > es.maxEnum {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(es.toString[e])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
new file mode 100644
index 000000000..1e0dbfd9d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -0,0 +1,630 @@
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoError struct {
+ Heading string
+ Message string
+ DocLink string
+ CodeLocation CodeLocation
+}
+
+func (g GinkgoError) Error() string {
+ out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading)
+ if (g.CodeLocation != CodeLocation{}) {
+ contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ")
+ if contentsOfLine != "" {
+ out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine)
+ }
+ out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation)
+ }
+ if g.Message != "" {
+ out += formatter.Fiw(1, formatter.COLS, g.Message)
+ out += "\n\n"
+ }
+ if g.DocLink != "" {
+ out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink)
+ }
+
+ return out
+}
+
+type ginkgoErrors struct{}
+
+var GinkgoErrors = ginkgoErrors{}
+
+func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Your Test Panicked",
+ Message: `When you, or your assertion library, calls Ginkgo's Fail(),
+Ginkgo panics to prevent subsequent assertions from running.
+
+Normally Ginkgo rescues this panic so you shouldn't see it.
+
+However, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
+To circumvent this, you should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic.
+
+Alternatively, you may have made an assertion outside of a Ginkgo
+leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to
+an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`,
+ DocLink: "mental-model-how-ginkgo-handles-failure",
+ CodeLocation: cl,
+ }
+}
+
+func (g ginkgoErrors) RerunningSuite() error {
+ return GinkgoError{
+ Heading: "Rerunning Suite",
+ Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`),
+ DocLink: "repeating-spec-runs-and-managing-flaky-specs",
+ }
+}
+
+/* Tree construction errors */
+
+func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node
+to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running.
+
+To enable randomization and parallelization Ginkgo requires the spec tree
+to be fully constructed up front. In practice, this means that you can
+only create nodes like {{bold}}[%s]{{/}} at the top-level or within the
+body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy",
+ }
+}
+
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Assertion or Panic detected during tree construction",
+ Message: formatter.F(
+ `Ginkgo detected a panic while constructing the spec tree.
+You may be trying to make an assertion in the body of a container node
+(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}).
+
+Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}},
+{{bold}}It{{/}}, etc.
+
+{{bold}}Here's the content of the panic that was caught:{{/}}
+%v`, caughtPanic),
+ CodeLocation: cl,
+ DocLink: "no-assertions-in-container-nodes",
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node but
+you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}.
+
+Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown),
+ CodeLocation: cl,
+ DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite",
+ }
+}
+
+/* Decorator errors */
+func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error {
+ return GinkgoError{
+ Heading: "Invalid Decorator",
+ Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: Focused and Pending",
+ Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly",
+ Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Decorator",
+ Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ if nodeType.Is(NodeTypeContainer) {
+ mustGet = "{{bold}}func(){{/}}"
+ }
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed `+mustGet+`.
+You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Multiple Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Missing Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+/* Ordered Container errors */
+func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Serial Node in Non-Serial Ordered Container",
+ Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Setup Node not in Ordered Container",
+ Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType),
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
+ Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+/* DeferCleanup errors */
+func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup requires a valid function",
+ Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup must be called inside a setup or subject node",
+ Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
+ Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup cannot be called in a DeferCleanup callback",
+ Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+/* ReportEntry errors */
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+ return GinkgoError{
+ Heading: "Too Many ReportEntry Values",
+ Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+/* By errors */
+func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "documenting-complex-specs-by",
+ }
+}
+
+/* FileFilter and SkipFilter errors */
+func (g ginkgoErrors) InvalidFileFilter(filter string) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter",
+ Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter),
+ DocLink: "filtering-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter Regular Expression",
+ Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err),
+ DocLink: "filtering-specs",
+ }
+}
+
+/* Label Errors */
+func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error {
+ var message string
+ if location >= 0 {
+ for i, r := range input {
+ if i == location {
+ message += "{{red}}{{bold}}{{underline}}"
+ }
+ message += string(r)
+ if i == location {
+ message += "{{/}}"
+ }
+ }
+ } else {
+ message = input
+ }
+ message += "\n" + error
+ return GinkgoError{
+ Heading: "Syntax Error Parsing Label Filter",
+ Message: message,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Label",
+ Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label),
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Empty Label",
+ Message: "Labels cannot be empty",
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+/* Table errors */
+func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed multiple functions",
+ Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Entry description",
+ Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("No parameters have been passed to the Table Function"),
+ Message: fmt.Sprintf("The Table Function expected at least 1 parameter"),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed incorrect parameter type",
+ Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too few parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too many parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+/* Parallel Synchronization errors */
+
+func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
+ return GinkgoError{
+ Heading: "Test Report unavailable because a Ginkgo parallel process disappeared",
+ Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error {
+ return GinkgoError{
+ Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1",
+ Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
+ return GinkgoError{
+ Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back",
+ Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.",
+ }
+}
+
+/* Configuration errors */
+
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Type passed to RunSpecs",
+ Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
+ }
+}
+
+var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead."
+
+func (g ginkgoErrors) InvalidParallelTotalConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.total must be >= 1",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) InvalidParallelProcessConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) MissingParallelHostConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.host is missing",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) UnreachableParallelHost(host string) error {
+ return GinkgoError{
+ Heading: "Could not reach ginkgo.parallel.host:" + host,
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) DryRunInParallelConfiguration() error {
+ return GinkgoError{
+ Heading: "Ginkgo only performs -dryRun in serial mode.",
+ Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.",
+ }
+}
+
+func (g ginkgoErrors) GracePeriodCannotBeZero() error {
+ return GinkgoError{
+ Heading: "Ginkgo requires a positive --grace-period.",
+ Message: "Please set --grace-period to a positive duration. The default is 30s.",
+ }
+}
+
+func (g ginkgoErrors) ConflictingVerbosityConfiguration() error {
+ return GinkgoError{
+ Heading: "Conflicting reporter verbosity settings.",
+ Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!",
+ }
+}
+
+func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value),
+ Message: "You must choose one of 'dup', 'swap', or 'none'.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagCount() error {
+ return GinkgoError{
+ Heading: "Use of go test -count",
+ Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagParallel() error {
+ return GinkgoError{
+ Heading: "Use of go test -parallel",
+ Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.",
+ }
+}
+
+func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
+ return GinkgoError{
+ Heading: "--repeat and --until-it-fails are both set",
+ Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?",
+ }
+}
+
+/* Stack-Trace parsing errors */
+
+func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
+ return GinkgoError{
+ Heading: "Failed to Parse Stack Trace",
+ Message: message,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
new file mode 100644
index 000000000..cc21df71e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
@@ -0,0 +1,106 @@
+package types
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+func ParseFileFilters(filters []string) (FileFilters, error) {
+ ffs := FileFilters{}
+ for _, filter := range filters {
+ ff := FileFilter{}
+ if filter == "" {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ components := strings.Split(filter, ":")
+ if !(len(components) == 1 || len(components) == 2) {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+
+ var err error
+ ff.Filename, err = regexp.Compile(components[0])
+ if err != nil {
+ return nil, err
+ }
+ if len(components) == 2 {
+ lineFilters := strings.Split(components[1], ",")
+ for _, lineFilter := range lineFilters {
+ components := strings.Split(lineFilter, "-")
+ if len(components) == 1 {
+ line, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1})
+ } else if len(components) == 2 {
+ line1, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ line2, err := strconv.Atoi(strings.TrimSpace(components[1]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2})
+ } else {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ }
+ }
+ ffs = append(ffs, ff)
+ }
+ return ffs, nil
+}
+
+type FileFilter struct {
+ Filename *regexp.Regexp
+ LineFilters LineFilters
+}
+
+func (f FileFilter) Matches(locations []CodeLocation) bool {
+ for _, location := range locations {
+ if f.Filename.MatchString(location.FileName) &&
+ f.LineFilters.Matches(location.LineNumber) {
+ return true
+ }
+
+ }
+ return false
+}
+
+type FileFilters []FileFilter
+
+func (ffs FileFilters) Matches(locations []CodeLocation) bool {
+ for _, ff := range ffs {
+ if ff.Matches(locations) {
+ return true
+ }
+ }
+
+ return false
+}
+
+type LineFilter struct {
+ Min int
+ Max int
+}
+
+func (lf LineFilter) Matches(line int) bool {
+ return lf.Min <= line && line < lf.Max
+}
+
+type LineFilters []LineFilter
+
+func (lfs LineFilters) Matches(line int) bool {
+ if len(lfs) == 0 {
+ return true
+ }
+
+ for _, lf := range lfs {
+ if lf.Matches(line) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
new file mode 100644
index 000000000..9186ae873
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -0,0 +1,489 @@
+package types
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoFlag struct {
+ Name string
+ KeyPath string
+ SectionKey string
+
+ Usage string
+ UsageArgument string
+ UsageDefaultValue string
+
+ DeprecatedName string
+ DeprecatedDocLink string
+ DeprecatedVersion string
+
+ ExportAs string
+}
+
+type GinkgoFlags []GinkgoFlag
+
+func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags {
+ out := GinkgoFlags{}
+ out = append(out, f...)
+ out = append(out, flags...)
+ return out
+}
+
+func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags {
+ if prefix == "" {
+ return f
+ }
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ if flag.Name != "" {
+ flag.Name = prefix + "." + flag.Name
+ }
+ if flag.DeprecatedName != "" {
+ flag.DeprecatedName = prefix + "." + flag.DeprecatedName
+ }
+ if flag.ExportAs != "" {
+ flag.ExportAs = prefix + "." + flag.ExportAs
+ }
+ out = append(out, flag)
+ }
+ return out
+}
+
+func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags {
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ for _, name := range names {
+ if flag.Name == name {
+ out = append(out, flag)
+ break
+ }
+ }
+ }
+ return out
+}
+
+type GinkgoFlagSection struct {
+ Key string
+ Style string
+ Succinct bool
+ Heading string
+ Description string
+}
+
+type GinkgoFlagSections []GinkgoFlagSection
+
+func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
+ for _, section := range gfs {
+ if section.Key == key {
+ return section, true
+ }
+ }
+
+ return GinkgoFlagSection{}, false
+}
+
+type GinkgoFlagSet struct {
+ flags GinkgoFlags
+ bindings interface{}
+
+ sections GinkgoFlagSections
+ extraGoFlagsSection GinkgoFlagSection
+
+ flagSet *flag.FlagSet
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ }, nil)
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ extraGoFlagsSection: extraGoFlagsSection,
+ }, flagSet)
+}
+
+func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) {
+ if flagSet == nil {
+ f.flagSet = flag.NewFlagSet("", flag.ContinueOnError)
+ //suppress all output as Ginkgo is responsible for formatting usage
+ f.flagSet.SetOutput(io.Discard)
+ } else {
+ f.flagSet = flagSet
+ //we're piggybacking on an existing flagset (typically go test) so we have limited control
+ //on user feedback
+ f.flagSet.Usage = f.substituteUsage
+ }
+
+ for _, flag := range f.flags {
+ name := flag.Name
+
+ deprecatedUsage := "[DEPRECATED]"
+ deprecatedName := flag.DeprecatedName
+ if name != "" {
+ deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name)
+ } else if flag.Usage != "" {
+ deprecatedUsage += " " + flag.Usage
+ }
+
+ value, ok := valueAtKeyPath(f.bindings, flag.KeyPath)
+ if !ok {
+ return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface, addr := value.Interface(), value.Addr().Interface()
+
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if name != "" {
+ f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage)
+ }
+ case reflect.TypeOf(int64(0)):
+ if name != "" {
+ f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage)
+ }
+ case reflect.TypeOf(float64(0)):
+ if name != "" {
+ f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage)
+ }
+ case reflect.TypeOf(int(0)):
+ if name != "" {
+ f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage)
+ }
+ case reflect.TypeOf(bool(true)):
+ if name != "" {
+ f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage)
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if name != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage)
+ }
+
+ case reflect.TypeOf([]string{}):
+ if name != "" {
+ f.flagSet.Var(stringSliceVar{value}, name, flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage)
+ }
+ default:
+ return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return f, nil
+}
+
+func (f GinkgoFlagSet) IsZero() bool {
+ return f.flagSet == nil
+}
+
+func (f GinkgoFlagSet) WasSet(name string) bool {
+ found := false
+ f.flagSet.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ found = true
+ }
+ })
+
+ return found
+}
+
+func (f GinkgoFlagSet) Lookup(name string) *flag.Flag {
+ return f.flagSet.Lookup(name)
+}
+
+func (f GinkgoFlagSet) Parse(args []string) ([]string, error) {
+ if f.IsZero() {
+ return args, nil
+ }
+ err := f.flagSet.Parse(args)
+ if err != nil {
+ return []string{}, err
+ }
+ return f.flagSet.Args(), nil
+}
+
+func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) {
+ if f.IsZero() {
+ return
+ }
+ f.flagSet.Visit(func(flag *flag.Flag) {
+ for _, ginkgoFlag := range f.flags {
+ if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) {
+ message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName)
+ if ginkgoFlag.Name != "" {
+ message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name)
+ } else if ginkgoFlag.Usage != "" {
+ message += " " + ginkgoFlag.Usage
+ }
+
+ deprecationTracker.TrackDeprecation(Deprecation{
+ Message: message,
+ DocLink: ginkgoFlag.DeprecatedDocLink,
+ Version: ginkgoFlag.DeprecatedVersion,
+ })
+ }
+ }
+ })
+}
+
+func (f GinkgoFlagSet) Usage() string {
+ if f.IsZero() {
+ return ""
+ }
+ groupedFlags := map[GinkgoFlagSection]GinkgoFlags{}
+ ungroupedFlags := GinkgoFlags{}
+ managedFlags := map[string]bool{}
+ extraGoFlags := []*flag.Flag{}
+
+ for _, flag := range f.flags {
+ managedFlags[flag.Name] = true
+ managedFlags[flag.DeprecatedName] = true
+
+ if flag.Name == "" {
+ continue
+ }
+
+ section, ok := f.sections.Lookup(flag.SectionKey)
+ if ok {
+ groupedFlags[section] = append(groupedFlags[section], flag)
+ } else {
+ ungroupedFlags = append(ungroupedFlags, flag)
+ }
+ }
+
+ f.flagSet.VisitAll(func(flag *flag.Flag) {
+ if !managedFlags[flag.Name] {
+ extraGoFlags = append(extraGoFlags, flag)
+ }
+ })
+
+ out := ""
+ for _, section := range f.sections {
+ flags := groupedFlags[section]
+ if len(flags) == 0 {
+ continue
+ }
+ out += f.usageForSection(section)
+ if section.Succinct {
+ succinctFlags := []string{}
+ for _, flag := range flags {
+ if flag.Name != "" {
+ succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name))
+ }
+ }
+ out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n")
+ } else {
+ for _, flag := range flags {
+ out += f.usageForFlag(flag, section.Style)
+ }
+ }
+ out += "\n"
+ }
+ if len(ungroupedFlags) > 0 {
+ for _, flag := range ungroupedFlags {
+ out += f.usageForFlag(flag, "")
+ }
+ out += "\n"
+ }
+ if len(extraGoFlags) > 0 {
+ out += f.usageForSection(f.extraGoFlagsSection)
+ for _, goFlag := range extraGoFlags {
+ out += f.usageForGoFlag(goFlag)
+ }
+ }
+
+ return out
+}
+
+func (f GinkgoFlagSet) substituteUsage() {
+ fmt.Fprintln(f.flagSet.Output(), f.Usage())
+}
+
+func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+ if len(keyPath) == 0 {
+ return reflect.Value{}, false
+ }
+
+ val := reflect.ValueOf(root)
+ components := strings.Split(keyPath, ".")
+ for _, component := range components {
+ val = reflect.Indirect(val)
+ switch val.Kind() {
+ case reflect.Map:
+ val = val.MapIndex(reflect.ValueOf(component))
+ if val.Kind() == reflect.Interface {
+ val = reflect.ValueOf(val.Interface())
+ }
+ case reflect.Struct:
+ val = val.FieldByName(component)
+ default:
+ return reflect.Value{}, false
+ }
+ if (val == reflect.Value{}) {
+ return reflect.Value{}, false
+ }
+ }
+
+ return val, true
+}
+
+func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string {
+ out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n")
+ if section.Description != "" {
+ out += formatter.Fiw(0, formatter.COLS, section.Description+"\n")
+ }
+ return out
+}
+
+func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string {
+ argument := flag.UsageArgument
+ defValue := flag.UsageDefaultValue
+ if argument == "" {
+ value, _ := valueAtKeyPath(f.bindings, flag.KeyPath)
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ argument = "string"
+ case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)):
+ argument = "int"
+ case reflect.TypeOf(time.Duration(0)):
+ argument = "duration"
+ case reflect.TypeOf(float64(0)):
+ argument = "float"
+ case reflect.TypeOf([]string{}):
+ argument = "string"
+ }
+ }
+ if argument != "" {
+ argument = "[" + argument + "] "
+ }
+ if defValue != "" {
+ defValue = fmt.Sprintf("(default: %s)", defValue)
+ }
+ hyphens := "--"
+ if len(flag.Name) == 1 {
+ hyphens = "-"
+ }
+
+ out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue)
+ out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage)
+ return out
+}
+
+func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string {
+ //Taken directly from the flag package
+ out := fmt.Sprintf(" -%s", goFlag.Name)
+ name, usage := flag.UnquoteUsage(goFlag)
+ if len(name) > 0 {
+ out += " " + name
+ }
+ if len(out) <= 4 {
+ out += "\t"
+ } else {
+ out += "\n \t"
+ }
+ out += strings.ReplaceAll(usage, "\n", "\n \t")
+ out += "\n"
+ return out
+}
+
+type stringSliceVar struct {
+ slice reflect.Value
+}
+
+func (ssv stringSliceVar) String() string { return "" }
+func (ssv stringSliceVar) Set(s string) error {
+ ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s})))
+ return nil
+}
+
+//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
+func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+ result := []string{}
+ for _, flag := range flags {
+ name := flag.ExportAs
+ if name == "" {
+ name = flag.Name
+ }
+ if name == "" {
+ continue
+ }
+
+ value, ok := valueAtKeyPath(bindings, flag.KeyPath)
+ if !ok {
+ return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface := value.Interface()
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if iface.(string) != "" {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+ case reflect.TypeOf(int64(0)):
+ if iface.(int64) != 0 {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(float64(0)):
+ if iface.(float64) != 0 {
+ result = append(result, fmt.Sprintf("--%s=%f", name, iface))
+ }
+ case reflect.TypeOf(int(0)):
+ if iface.(int) != 0 {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(bool(true)):
+ if iface.(bool) {
+ result = append(result, fmt.Sprintf("--%s", name))
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if iface.(time.Duration) != time.Duration(0) {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+
+ case reflect.TypeOf([]string{}):
+ strings := iface.([]string)
+ for _, s := range strings {
+ result = append(result, fmt.Sprintf("--%s=%s", name, s))
+ }
+ default:
+ return []string{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
new file mode 100644
index 000000000..b0d3b651e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -0,0 +1,358 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var DEBUG_LABEL_FILTER_PARSING = false
+
+type LabelFilter func([]string) bool
+
+func matchLabelAction(label string) LabelFilter {
+ expected := strings.ToLower(label)
+ return func(labels []string) bool {
+ for i := range labels {
+ if strings.ToLower(labels[i]) == expected {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter {
+ return func(labels []string) bool {
+ for i := range labels {
+ if regex.MatchString(labels[i]) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func notAction(filter LabelFilter) LabelFilter {
+ return func(labels []string) bool { return !filter(labels) }
+}
+
+func andAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) && b(labels) }
+}
+
+func orAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) || b(labels) }
+}
+
+type lfToken uint
+
+const (
+ lfTokenInvalid lfToken = iota
+
+ lfTokenRoot
+ lfTokenOpenGroup
+ lfTokenCloseGroup
+ lfTokenNot
+ lfTokenAnd
+ lfTokenOr
+ lfTokenRegexp
+ lfTokenLabel
+ lfTokenEOF
+)
+
+func (l lfToken) Precedence() int {
+ switch l {
+ case lfTokenRoot, lfTokenOpenGroup:
+ return 0
+ case lfTokenOr:
+ return 1
+ case lfTokenAnd:
+ return 2
+ case lfTokenNot:
+ return 3
+ }
+ return -1
+}
+
+func (l lfToken) String() string {
+ switch l {
+ case lfTokenRoot:
+ return "ROOT"
+ case lfTokenOpenGroup:
+ return "("
+ case lfTokenCloseGroup:
+ return ")"
+ case lfTokenNot:
+ return "!"
+ case lfTokenAnd:
+ return "&&"
+ case lfTokenOr:
+ return "||"
+ case lfTokenRegexp:
+ return "/regexp/"
+ case lfTokenLabel:
+ return "label"
+ case lfTokenEOF:
+ return "EOF"
+ }
+ return "INVALID"
+}
+
+type treeNode struct {
+ token lfToken
+ location int
+ value string
+
+ parent *treeNode
+ leftNode *treeNode
+ rightNode *treeNode
+}
+
+func (tn *treeNode) setRightNode(node *treeNode) {
+ tn.rightNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) setLeftNode(node *treeNode) {
+ tn.leftNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode {
+ if tn.token.Precedence() <= precedence {
+ return tn
+ }
+ return tn.parent.firstAncestorWithPrecedenceLEQ(precedence)
+}
+
+func (tn *treeNode) firstUnmatchedOpenNode() *treeNode {
+ if tn.token == lfTokenOpenGroup {
+ return tn
+ }
+ if tn.parent == nil {
+ return nil
+ }
+ return tn.parent.firstUnmatchedOpenNode()
+}
+
+func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
+ switch tn.token {
+ case lfTokenOpenGroup:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.")
+ case lfTokenLabel:
+ return matchLabelAction(tn.value), nil
+ case lfTokenRegexp:
+ re, err := regexp.Compile(tn.value)
+ if err != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
+ }
+ return matchLabelRegexAction(re), nil
+ }
+
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.")
+ }
+ rightLF, err := tn.rightNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenRoot, lfTokenCloseGroup:
+ return rightLF, nil
+ case lfTokenNot:
+ return notAction(rightLF), nil
+ }
+
+ if tn.leftNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token))
+ }
+ leftLF, err := tn.leftNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenAnd:
+ return andAction(leftLF, rightLF), nil
+ case lfTokenOr:
+ return orAction(leftLF, rightLF), nil
+ }
+
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token))
+}
+
+func (tn *treeNode) tokenString() string {
+ out := fmt.Sprintf("<%s", tn.token)
+ if tn.value != "" {
+ out += " | " + tn.value
+ }
+ out += ">"
+ return out
+}
+
+func (tn *treeNode) toString(indent int) string {
+ out := tn.tokenString() + "\n"
+ if tn.leftNode != nil {
+ out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1))
+ }
+ if tn.rightNode != nil {
+ out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1))
+ }
+ return out
+}
+
+func tokenize(input string) func() (*treeNode, error) {
+ runes, i := []rune(input), 0
+
+ peekIs := func(r rune) bool {
+ if i+1 < len(runes) {
+ return runes[i+1] == r
+ }
+ return false
+ }
+
+ consumeUntil := func(cutset string) (string, int) {
+ j := i
+ for ; j < len(runes); j++ {
+ if strings.IndexRune(cutset, runes[j]) >= 0 {
+ break
+ }
+ }
+ return string(runes[i:j]), j - i
+ }
+
+ return func() (*treeNode, error) {
+ for i < len(runes) && runes[i] == ' ' {
+ i += 1
+ }
+
+ if i >= len(runes) {
+ return &treeNode{token: lfTokenEOF}, nil
+ }
+
+ node := &treeNode{location: i}
+ switch runes[i] {
+ case '&':
+ if !peekIs('&') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?")
+ }
+ i += 2
+ node.token = lfTokenAnd
+ case '|':
+ if !peekIs('|') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?")
+ }
+ i += 2
+ node.token = lfTokenOr
+ case '!':
+ i += 1
+ node.token = lfTokenNot
+ case ',':
+ i += 1
+ node.token = lfTokenOr
+ case '(':
+ i += 1
+ node.token = lfTokenOpenGroup
+ case ')':
+ i += 1
+ node.token = lfTokenCloseGroup
+ case '/':
+ i += 1
+ value, n := consumeUntil("/")
+ i += n + 1
+ node.token, node.value = lfTokenRegexp, value
+ default:
+ value, n := consumeUntil("&|!,()/")
+ i += n
+ node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
+ }
+ return node, nil
+ }
+}
+
+func MustParseLabelFilter(input string) LabelFilter {
+ filter, err := ParseLabelFilter(input)
+ if err != nil {
+ panic(err)
+ }
+ return filter
+}
+
+func ParseLabelFilter(input string) (LabelFilter, error) {
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Println("\n==============")
+ fmt.Println("Input: ", input)
+ fmt.Print("Tokens: ")
+ }
+ if input == "" {
+ return func(_ []string) bool { return true }, nil
+ }
+ nextToken := tokenize(input)
+
+ root := &treeNode{token: lfTokenRoot}
+ current := root
+LOOP:
+ for {
+ node, err := nextToken()
+ if err != nil {
+ return nil, err
+ }
+
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Print(node.tokenString() + " ")
+ }
+
+ switch node.token {
+ case lfTokenEOF:
+ break LOOP
+ case lfTokenLabel, lfTokenRegexp:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
+ }
+ current.setRightNode(node)
+ case lfTokenNot, lfTokenOpenGroup:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ current = node
+ case lfTokenAnd, lfTokenOr:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token))
+ }
+ nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence())
+ node.setLeftNode(nodeToStealFrom.rightNode)
+ nodeToStealFrom.setRightNode(node)
+ current = node
+ case lfTokenCloseGroup:
+ firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
+ if firstUnmatchedOpenNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.")
+ }
+ if firstUnmatchedOpenNode == current && current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.")
+ }
+ firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed
+ current = firstUnmatchedOpenNode.parent
+ default:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token))
+ }
+ }
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Printf("\n Tree:\n%s", root.toString(0))
+ }
+ return root.constructLabelFilter(input)
+}
+
+func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
+ out := strings.TrimSpace(label)
+ if out == "" {
+ return "", GinkgoErrors.InvalidEmptyLabel(cl)
+ }
+ if strings.ContainsAny(out, "&|!,()/") {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
new file mode 100644
index 000000000..7b1524b52
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -0,0 +1,190 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
+// and across the network connection when running in parallel
+type ReportEntryValue struct {
+ raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ AsJSON string
+ Representation string
+}
+
+func WrapEntryValue(value interface{}) ReportEntryValue {
+ return ReportEntryValue{
+ raw: value,
+ }
+}
+
+func (rev ReportEntryValue) GetRawValue() interface{} {
+ return rev.raw
+}
+
+func (rev ReportEntryValue) String() string {
+ if rev.raw == nil {
+ return ""
+ }
+ if colorableStringer, ok := rev.raw.(ColorableStringer); ok {
+ return colorableStringer.ColorableString()
+ }
+
+ if stringer, ok := rev.raw.(fmt.Stringer); ok {
+ return stringer.String()
+ }
+ if rev.Representation != "" {
+ return rev.Representation
+ }
+ return fmt.Sprintf("%+v", rev.raw)
+}
+
+func (rev ReportEntryValue) MarshalJSON() ([]byte, error) {
+ //All this to capture the representation at encoding-time, not creating time
+ //This way users can Report on pointers and get their final values at reporting-time
+ out := struct {
+ AsJSON string
+ Representation string
+ }{
+ Representation: rev.String(),
+ }
+ asJSON, err := json.Marshal(rev.raw)
+ if err != nil {
+ return nil, err
+ }
+ out.AsJSON = string(asJSON)
+
+ return json.Marshal(out)
+}
+
+func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error {
+ in := struct {
+ AsJSON string
+ Representation string
+ }{}
+ err := json.Unmarshal(data, &in)
+ if err != nil {
+ return err
+ }
+ rev.AsJSON = in.AsJSON
+ rev.Representation = in.Representation
+ return json.Unmarshal([]byte(in.AsJSON), &(rev.raw))
+}
+
+func (rev ReportEntryValue) GobEncode() ([]byte, error) {
+ return rev.MarshalJSON()
+}
+
+func (rev *ReportEntryValue) GobDecode(data []byte) error {
+ return rev.UnmarshalJSON(data)
+}
+
+// ReportEntry captures information attached to `SpecReport` via `AddReportEntry`
+type ReportEntry struct {
+ // Visibility captures the visibility policy for this ReportEntry
+ Visibility ReportEntryVisibility
+ // Location captures the location of the AddReportEntry call
+ Location CodeLocation
+
+ Time time.Time //need this for backwards compatibility
+ TimelineLocation TimelineLocation
+
+ // Name captures the name of this report
+ Name string
+ // Value captures the (optional) object passed into AddReportEntry - this can be
+ // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make
+ // encoding/decoding the value easier. To access the raw value call entry.GetRawValue()
+ Value ReportEntryValue
+}
+
+// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation.
+type ColorableStringer interface {
+ ColorableString() string
+}
+
+// StringRepresentation() returns the string representation of the value associated with the ReportEntry --
+// if value is nil, empty string is returned
+// if value is a `ColorableStringer` then `Value.ColorableString()` is returned
+// if value is a `fmt.Stringer` then `Value.String()` is returned
+// otherwise the value is formatted with "%+v"
+func (entry ReportEntry) StringRepresentation() string {
+ return entry.Value.String()
+}
+
+// GetRawValue returns the Value object that was passed to AddReportEntry
+// If called in-process this will be the same object that was passed into AddReportEntry.
+// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
+// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
+// field yourself.
+func (entry ReportEntry) GetRawValue() interface{} {
+ return entry.Value.GetRawValue()
+}
+
+func (entry ReportEntry) GetTimelineLocation() TimelineLocation {
+ return entry.TimelineLocation
+}
+
+type ReportEntries []ReportEntry
+
+func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool {
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ return true
+ }
+ }
+ return false
+}
+
+func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries {
+ out := ReportEntries{}
+
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ out = append(out, entry)
+ }
+ }
+
+ return out
+}
+
+// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+type ReportEntryVisibility uint
+
+const (
+ // Always print out this ReportEntry
+ ReportEntryVisibilityAlways ReportEntryVisibility = iota
+ // Only print out this ReportEntry if the spec fails or if the test is run with -v
+ ReportEntryVisibilityFailureOrVerbose
+ // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.))
+ ReportEntryVisibilityNever
+)
+
+var revEnumSupport = NewEnumSupport(map[uint]string{
+ uint(ReportEntryVisibilityAlways): "always",
+ uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose",
+ uint(ReportEntryVisibilityNever): "never",
+})
+
+func (rev ReportEntryVisibility) String() string {
+ return revEnumSupport.String(uint(rev))
+}
+func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error {
+ out, err := revEnumSupport.UnmarshJSON(b)
+ *rev = ReportEntryVisibility(out)
+ return err
+}
+func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) {
+ return revEnumSupport.MarshJSON(uint(rev))
+}
+
+func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool {
+ for _, visibility := range visibilities {
+ if v == visibility {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go
new file mode 100644
index 000000000..d048a8ada
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -0,0 +1,916 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+const GINKGO_FOCUS_EXIT_CODE = 197
+const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
+
+// Report captures information about a Ginkgo test run
+type Report struct {
+ //SuitePath captures the absolute path to the test suite
+ SuitePath string
+
+ //SuiteDescription captures the description string passed to the DSL's RunSpecs() function
+ SuiteDescription string
+
+ //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
+ SuiteLabels []string
+
+ //SuiteSucceeded captures the success or failure status of the test run
+ //If true, the test run is considered successful.
+ //If false, the test run is considered unsuccessful
+ SuiteSucceeded bool
+
+ //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused
+ //(i.e an `FIt` or an `FDescribe`
+ SuiteHasProgrammaticFocus bool
+
+ //SpecialSuiteFailureReasons may contain special failure reasons
+ //For example, a test suite might be considered "failed" even if none of the individual specs
+ //have a failure state. For example, if the user has configured --fail-on-pending the test suite
+ //will have failed if there are pending tests even though all non-pending tests may have passed. In such
+ //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure.
+ //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user.
+ //Since multiple special failure reasons can occur, this field is a slice.
+ SpecialSuiteFailureReasons []string
+
+ //PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+ //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+ //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+ PreRunStats PreRunStats
+
+ //StartTime and EndTime capture the start and end time of the test run
+ StartTime time.Time
+ EndTime time.Time
+
+ //RunTime captures the duration of the test run
+ RunTime time.Duration
+
+ //SuiteConfig captures the Ginkgo configuration governing this test run
+ //SuiteConfig includes information necessary for reproducing an identical test run,
+ //such as the random seed and any filters applied during the test run
+ SuiteConfig SuiteConfig
+
+ //SpecReports is a list of all SpecReports generated by this test run
+ //It is empty when the SuiteReport is provided to ReportBeforeSuite
+ SpecReports SpecReports
+}
+
+// PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+type PreRunStats struct {
+ TotalSpecs int
+ SpecsThatWillRun int
+}
+
+// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
+// to form a complete final report.
+func (report Report) Add(other Report) Report {
+ report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded
+
+ if other.StartTime.Before(report.StartTime) {
+ report.StartTime = other.StartTime
+ }
+
+ if other.EndTime.After(report.EndTime) {
+ report.EndTime = other.EndTime
+ }
+
+ specialSuiteFailureReasons := []string{}
+ reasonsLookup := map[string]bool{}
+ for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} {
+ for _, reason := range reasons {
+ if !reasonsLookup[reason] {
+ reasonsLookup[reason] = true
+ specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason)
+ }
+ }
+ }
+ report.SpecialSuiteFailureReasons = specialSuiteFailureReasons
+ report.RunTime = report.EndTime.Sub(report.StartTime)
+
+ reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
+ for i := range report.SpecReports {
+ reports[i] = report.SpecReports[i]
+ }
+ offset := len(report.SpecReports)
+ for i := range other.SpecReports {
+ reports[i+offset] = other.SpecReports[i]
+ }
+
+ report.SpecReports = reports
+ return report
+}
+
+// SpecReport captures information about a Ginkgo spec.
+type SpecReport struct {
+ // ContainerHierarchyTexts is a slice containing the text strings of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyTexts []string
+
+ // ContainerHierarchyLocations is a slice containing the CodeLocations of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyLocations []CodeLocation
+
+ // ContainerHierarchyLabels is a slice containing the labels of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchyLabels [][]string
+
+ // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
+ // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
+ // one of the NodeTypesForSuiteLevelNodes node types)
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+
+ // State captures whether the spec has passed, failed, etc.
+ State SpecState
+
+ // IsSerial captures whether the spec has the Serial decorator
+ IsSerial bool
+
+ // IsInOrderedContainer captures whether the spec appears in an Ordered container
+ IsInOrderedContainer bool
+
+ // StartTime and EndTime capture the start and end time of the spec
+ StartTime time.Time
+ EndTime time.Time
+
+ // RunTime captures the duration of the spec
+ RunTime time.Duration
+
+ // ParallelProcess captures the parallel process that this spec ran on
+ ParallelProcess int
+
+ // RunningInParallel captures whether this spec is part of a suite that ran in parallel
+ RunningInParallel bool
+
+ //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip())
+ //It includes detailed information about the Failure
+ Failure Failure
+
+ // NumAttempts captures the number of times this Spec was run.
+ // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ // Repeated specs can be retried with the use of the MustPassRepeatedly decorator
+ NumAttempts int
+
+ // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ MaxFlakeAttempts int
+
+ // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator
+ MaxMustPassRepeatedly int
+
+ // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter
+ CapturedGinkgoWriterOutput string
+
+ // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel)
+ // This is always empty when running in series or calling CurrentSpecReport()
+ // It is used internally by Ginkgo's reporter
+ CapturedStdOutErr string
+
+ // ReportEntries contains any reports added via `AddReportEntry`
+ ReportEntries ReportEntries
+
+ // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator
+ ProgressReports []ProgressReport
+
+ // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode.
+ AdditionalFailures []AdditionalFailure
+
+ // SpecEvents capture additional events that occur during the spec run
+ SpecEvents SpecEvents
+}
+
+func (report SpecReport) MarshalJSON() ([]byte, error) {
+ //All this to avoid emitting an empty Failure struct in the JSON
+ out := struct {
+ ContainerHierarchyTexts []string
+ ContainerHierarchyLocations []CodeLocation
+ ContainerHierarchyLabels [][]string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+ State SpecState
+ StartTime time.Time
+ EndTime time.Time
+ RunTime time.Duration
+ ParallelProcess int
+ Failure *Failure `json:",omitempty"`
+ NumAttempts int
+ MaxFlakeAttempts int
+ MaxMustPassRepeatedly int
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ CapturedStdOutErr string `json:",omitempty"`
+ ReportEntries ReportEntries `json:",omitempty"`
+ ProgressReports []ProgressReport `json:",omitempty"`
+ AdditionalFailures []AdditionalFailure `json:",omitempty"`
+ SpecEvents SpecEvents `json:",omitempty"`
+ }{
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ ContainerHierarchyLocations: report.ContainerHierarchyLocations,
+ ContainerHierarchyLabels: report.ContainerHierarchyLabels,
+ LeafNodeType: report.LeafNodeType,
+ LeafNodeLocation: report.LeafNodeLocation,
+ LeafNodeLabels: report.LeafNodeLabels,
+ LeafNodeText: report.LeafNodeText,
+ State: report.State,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ RunTime: report.RunTime,
+ ParallelProcess: report.ParallelProcess,
+ Failure: nil,
+ ReportEntries: nil,
+ NumAttempts: report.NumAttempts,
+ MaxFlakeAttempts: report.MaxFlakeAttempts,
+ MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
+ CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
+ CapturedStdOutErr: report.CapturedStdOutErr,
+ }
+
+ if !report.Failure.IsZero() {
+ out.Failure = &(report.Failure)
+ }
+ if len(report.ReportEntries) > 0 {
+ out.ReportEntries = report.ReportEntries
+ }
+ if len(report.ProgressReports) > 0 {
+ out.ProgressReports = report.ProgressReports
+ }
+ if len(report.AdditionalFailures) > 0 {
+ out.AdditionalFailures = report.AdditionalFailures
+ }
+ if len(report.SpecEvents) > 0 {
+ out.SpecEvents = report.SpecEvents
+ }
+
+ return json.Marshal(out)
+}
+
+// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput
+// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty.
+// CombinedOutput() is used internally by Ginkgo's reporter.
+func (report SpecReport) CombinedOutput() string {
+ if report.CapturedStdOutErr == "" {
+ return report.CapturedGinkgoWriterOutput
+ }
+ if report.CapturedGinkgoWriterOutput == "" {
+ return report.CapturedStdOutErr
+ }
+ return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput
+}
+
+// Failed returns true if report.State is one of the SpecStateFailureStates
+// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted)
+func (report SpecReport) Failed() bool {
+ return report.State.Is(SpecStateFailureStates)
+}
+
+// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
+func (report SpecReport) FullText() string {
+ texts := []string{}
+ texts = append(texts, report.ContainerHierarchyTexts...)
+ if report.LeafNodeText != "" {
+ texts = append(texts, report.LeafNodeText)
+ }
+ return strings.Join(texts, " ")
+}
+
+// Labels returns a deduped set of all the spec's Labels.
+func (report SpecReport) Labels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, labels := range report.ContainerHierarchyLabels {
+ for _, label := range labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ for _, label := range report.LeafNodeLabels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+
+ return out
+}
+
+// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
+func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
+ filter, err := ParseLabelFilter(query)
+ if err != nil {
+ return false, err
+ }
+ return filter(report.Labels()), nil
+}
+
+// FileName() returns the name of the file containing the spec
+func (report SpecReport) FileName() string {
+ return report.LeafNodeLocation.FileName
+}
+
+// LineNumber() returns the line number of the leaf node
+func (report SpecReport) LineNumber() int {
+ return report.LeafNodeLocation.LineNumber
+}
+
+// FailureMessage() returns the failure message (or empty string if the test hasn't failed)
+func (report SpecReport) FailureMessage() string {
+ return report.Failure.Message
+}
+
+// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
+func (report SpecReport) FailureLocation() CodeLocation {
+ return report.Failure.Location
+}
+
+// Timeline() returns a timeline view of the report
+func (report SpecReport) Timeline() Timeline {
+ timeline := Timeline{}
+ if !report.Failure.IsZero() {
+ timeline = append(timeline, report.Failure)
+ if report.Failure.AdditionalFailure != nil {
+ timeline = append(timeline, *(report.Failure.AdditionalFailure))
+ }
+ }
+ for _, additionalFailure := range report.AdditionalFailures {
+ timeline = append(timeline, additionalFailure)
+ }
+ for _, reportEntry := range report.ReportEntries {
+ timeline = append(timeline, reportEntry)
+ }
+ for _, progressReport := range report.ProgressReports {
+ timeline = append(timeline, progressReport)
+ }
+ for _, specEvent := range report.SpecEvents {
+ timeline = append(timeline, specEvent)
+ }
+ sort.Sort(timeline)
+ return timeline
+}
+
+type SpecReports []SpecReport
+
+// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
+func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out := make(SpecReports, count)
+ j := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// WithState returns the subset of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) WithState(states SpecState) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ count++
+ }
+ }
+
+ out, j := make(SpecReports, count), 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) CountWithState(states SpecState) int {
+ n := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
+func (reports SpecReports) CountOfFlakedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
+func (reports SpecReports) CountOfRepeatedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// TimelineLocation captures the location of an event in the spec's timeline
+type TimelineLocation struct {
+ //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream
+ Offset int `json:",omitempty"`
+
+ //Order is the order of the event with respect to other events. The absolute value of Order
+ //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order
+ Order int `json:",omitempty"`
+
+ Time time.Time
+}
+
+// TimelineEvent represent an event on the timeline
+// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it
+type TimelineEvent interface {
+ GetTimelineLocation() TimelineLocation
+}
+
+type Timeline []TimelineEvent
+
+func (t Timeline) Len() int { return len(t) }
+func (t Timeline) Less(i, j int) bool {
+ return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order
+}
+func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t Timeline) WithoutHiddenReportEntries() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+// Failure captures failure information for an individual test
+type Failure struct {
+ // Message - the failure message passed into Fail(...). When using a matcher library
+ // like Gomega, this will contain the failure message generated by Gomega.
+ //
+ // Message is also populated if the user has called Skip(...).
+ Message string
+
+ // Location - the CodeLocation where the failure occurred
+ // This CodeLocation will include a fully-populated StackTrace
+ Location CodeLocation
+
+ TimelineLocation TimelineLocation
+
+ // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked)
+ // then ForwardedPanic will be populated with a string representation of the captured panic.
+ ForwardedPanic string `json:",omitempty"`
+
+ // FailureNodeContext - one of three contexts describing the node in which the failure occurred:
+ // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated
+ // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated.
+ // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated.
+ //
+ // FailureNodeType will contain the NodeType of the node in which the failure occurred.
+ // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred.
+ // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred.
+ FailureNodeContext FailureNodeContext `json:",omitempty"`
+
+ FailureNodeType NodeType `json:",omitempty"`
+
+ FailureNodeLocation CodeLocation `json:",omitempty"`
+
+ FailureNodeContainerIndex int `json:",omitempty"`
+
+ //ProgressReport is populated if the spec was interrupted or timed out
+ ProgressReport ProgressReport `json:",omitempty"`
+
+ //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck.
+ AdditionalFailure *AdditionalFailure `json:",omitempty"`
+}
+
+func (f Failure) IsZero() bool {
+ return f.Message == "" && (f.Location == CodeLocation{})
+}
+
+func (f Failure) GetTimelineLocation() TimelineLocation {
+ return f.TimelineLocation
+}
+
+// FailureNodeContext captures the location context for the node containing the failing line of code
+type FailureNodeContext uint
+
+const (
+ FailureNodeContextInvalid FailureNodeContext = iota
+
+ FailureNodeIsLeafNode
+ FailureNodeAtTopLevel
+ FailureNodeInContainer
+)
+
+var fncEnumSupport = NewEnumSupport(map[uint]string{
+ uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT",
+ uint(FailureNodeIsLeafNode): "leaf-node",
+ uint(FailureNodeAtTopLevel): "top-level",
+ uint(FailureNodeInContainer): "in-container",
+})
+
+func (fnc FailureNodeContext) String() string {
+ return fncEnumSupport.String(uint(fnc))
+}
+func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error {
+ out, err := fncEnumSupport.UnmarshJSON(b)
+ *fnc = FailureNodeContext(out)
+ return err
+}
+func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) {
+ return fncEnumSupport.MarshJSON(uint(fnc))
+}
+
+// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec
+// these typically occur in clean up nodes after the spec has failed.
+// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is
+type AdditionalFailure struct {
+ State SpecState
+ Failure Failure
+}
+
+func (f AdditionalFailure) GetTimelineLocation() TimelineLocation {
+ return f.Failure.TimelineLocation
+}
+
+// SpecState captures the state of a spec
+// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)`
+type SpecState uint
+
+const (
+ SpecStateInvalid SpecState = 0
+
+ SpecStatePending SpecState = 1 << iota
+ SpecStateSkipped
+ SpecStatePassed
+ SpecStateFailed
+ SpecStateAborted
+ SpecStatePanicked
+ SpecStateInterrupted
+ SpecStateTimedout
+)
+
+var ssEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecStateInvalid): "INVALID SPEC STATE",
+ uint(SpecStatePending): "pending",
+ uint(SpecStateSkipped): "skipped",
+ uint(SpecStatePassed): "passed",
+ uint(SpecStateFailed): "failed",
+ uint(SpecStateAborted): "aborted",
+ uint(SpecStatePanicked): "panicked",
+ uint(SpecStateInterrupted): "interrupted",
+ uint(SpecStateTimedout): "timedout",
+})
+
+func (ss SpecState) String() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss SpecState) GomegaString() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss *SpecState) UnmarshalJSON(b []byte) error {
+ out, err := ssEnumSupport.UnmarshJSON(b)
+ *ss = SpecState(out)
+ return err
+}
+func (ss SpecState) MarshalJSON() ([]byte, error) {
+ return ssEnumSupport.MarshJSON(uint(ss))
+}
+
+var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted
+
+func (ss SpecState) Is(states SpecState) bool {
+ return ss&states != 0
+}
+
+// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace
+type ProgressReport struct {
+ Message string `json:",omitempty"`
+ ParallelProcess int `json:",omitempty"`
+ RunningInParallel bool `json:",omitempty"`
+
+ ContainerHierarchyTexts []string `json:",omitempty"`
+ LeafNodeText string `json:",omitempty"`
+ LeafNodeLocation CodeLocation `json:",omitempty"`
+ SpecStartTime time.Time `json:",omitempty"`
+
+ CurrentNodeType NodeType `json:",omitempty"`
+ CurrentNodeText string `json:",omitempty"`
+ CurrentNodeLocation CodeLocation `json:",omitempty"`
+ CurrentNodeStartTime time.Time `json:",omitempty"`
+
+ CurrentStepText string `json:",omitempty"`
+ CurrentStepLocation CodeLocation `json:",omitempty"`
+ CurrentStepStartTime time.Time `json:",omitempty"`
+
+ AdditionalReports []string `json:",omitempty"`
+
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ TimelineLocation TimelineLocation `json:",omitempty"`
+
+ Goroutines []Goroutine `json:",omitempty"`
+}
+
+func (pr ProgressReport) IsZero() bool {
+ return pr.CurrentNodeType == NodeTypeInvalid
+}
+
+func (pr ProgressReport) Time() time.Time {
+ return pr.TimelineLocation.Time
+}
+
+func (pr ProgressReport) SpecGoroutine() Goroutine {
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine {
+ return goroutine
+ }
+ }
+ return Goroutine{}
+}
+
+func (pr ProgressReport) HighlightedGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) OtherGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport {
+ out := pr
+ out.CapturedGinkgoWriterOutput = ""
+ return out
+}
+
+func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport {
+ out := pr
+ filteredGoroutines := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ filteredGoroutines = append(filteredGoroutines, goroutine)
+ }
+ }
+ out.Goroutines = filteredGoroutines
+ return out
+}
+
+func (pr ProgressReport) GetTimelineLocation() TimelineLocation {
+ return pr.TimelineLocation
+}
+
+type Goroutine struct {
+ ID uint64
+ State string
+ Stack []FunctionCall
+ IsSpecGoroutine bool
+}
+
+func (g Goroutine) IsZero() bool {
+ return g.ID == 0
+}
+
+func (g Goroutine) HasHighlights() bool {
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ return true
+ }
+ }
+
+ return false
+}
+
+type FunctionCall struct {
+ Function string
+ Filename string
+ Line int
+ Highlight bool `json:",omitempty"`
+ Source []string `json:",omitempty"`
+ SourceHighlight int `json:",omitempty"`
+}
+
+// NodeType captures the type of a given Ginkgo Node
+type NodeType uint
+
+const (
+ NodeTypeInvalid NodeType = 0
+
+ NodeTypeContainer NodeType = 1 << iota
+ NodeTypeIt
+
+ NodeTypeBeforeEach
+ NodeTypeJustBeforeEach
+ NodeTypeAfterEach
+ NodeTypeJustAfterEach
+
+ NodeTypeBeforeAll
+ NodeTypeAfterAll
+
+ NodeTypeBeforeSuite
+ NodeTypeSynchronizedBeforeSuite
+ NodeTypeAfterSuite
+ NodeTypeSynchronizedAfterSuite
+
+ NodeTypeReportBeforeEach
+ NodeTypeReportAfterEach
+ NodeTypeReportBeforeSuite
+ NodeTypeReportAfterSuite
+
+ NodeTypeCleanupInvalid
+ NodeTypeCleanupAfterEach
+ NodeTypeCleanupAfterAll
+ NodeTypeCleanupAfterSuite
+)
+
+var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt
+var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite
+
+var ntEnumSupport = NewEnumSupport(map[uint]string{
+ uint(NodeTypeInvalid): "INVALID NODE TYPE",
+ uint(NodeTypeContainer): "Container",
+ uint(NodeTypeIt): "It",
+ uint(NodeTypeBeforeEach): "BeforeEach",
+ uint(NodeTypeJustBeforeEach): "JustBeforeEach",
+ uint(NodeTypeAfterEach): "AfterEach",
+ uint(NodeTypeJustAfterEach): "JustAfterEach",
+ uint(NodeTypeBeforeAll): "BeforeAll",
+ uint(NodeTypeAfterAll): "AfterAll",
+ uint(NodeTypeBeforeSuite): "BeforeSuite",
+ uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite",
+ uint(NodeTypeAfterSuite): "AfterSuite",
+ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite",
+ uint(NodeTypeReportBeforeEach): "ReportBeforeEach",
+ uint(NodeTypeReportAfterEach): "ReportAfterEach",
+ uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite",
+ uint(NodeTypeReportAfterSuite): "ReportAfterSuite",
+ uint(NodeTypeCleanupInvalid): "DeferCleanup",
+ uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)",
+ uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)",
+ uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)",
+})
+
+func (nt NodeType) String() string {
+ return ntEnumSupport.String(uint(nt))
+}
+func (nt *NodeType) UnmarshalJSON(b []byte) error {
+ out, err := ntEnumSupport.UnmarshJSON(b)
+ *nt = NodeType(out)
+ return err
+}
+func (nt NodeType) MarshalJSON() ([]byte, error) {
+ return ntEnumSupport.MarshJSON(uint(nt))
+}
+
+func (nt NodeType) Is(nodeTypes NodeType) bool {
+ return nt&nodeTypes != 0
+}
+
+/*
+SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events.
+*/
+type SpecEvent struct {
+ SpecEventType SpecEventType
+
+ CodeLocation CodeLocation
+ TimelineLocation TimelineLocation
+
+ Message string `json:",omitempty"`
+ Duration time.Duration `json:",omitempty"`
+ NodeType NodeType `json:",omitempty"`
+ Attempt int `json:",omitempty"`
+}
+
+func (se SpecEvent) GetTimelineLocation() TimelineLocation {
+ return se.TimelineLocation
+}
+
+func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool {
+ return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd)
+}
+
+func (se SpecEvent) GomegaString() string {
+ out := &strings.Builder{}
+ out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ")
+ if se.Message != "" {
+ out.WriteString("Message=")
+ out.WriteString(`"` + se.Message + `",`)
+ }
+ if se.Duration != 0 {
+ out.WriteString("Duration=" + se.Duration.String() + ",")
+ }
+ if se.NodeType != NodeTypeInvalid {
+ out.WriteString("NodeType=" + se.NodeType.String() + ",")
+ }
+ if se.Attempt != 0 {
+ out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",")
+ }
+ out.WriteString("CL=" + se.CodeLocation.String() + ",")
+ out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset))
+
+ return out.String()
+}
+
+type SpecEvents []SpecEvent
+
+func (se SpecEvents) WithType(seType SpecEventType) SpecEvents {
+ out := SpecEvents{}
+ for _, event := range se {
+ if event.SpecEventType.Is(seType) {
+ out = append(out, event)
+ }
+ }
+ return out
+}
+
+type SpecEventType uint
+
+const (
+ SpecEventInvalid SpecEventType = 0
+
+ SpecEventByStart SpecEventType = 1 << iota
+ SpecEventByEnd
+ SpecEventNodeStart
+ SpecEventNodeEnd
+ SpecEventSpecRepeat
+ SpecEventSpecRetry
+)
+
+var seEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecEventInvalid): "INVALID SPEC EVENT",
+ uint(SpecEventByStart): "By",
+ uint(SpecEventByEnd): "By (End)",
+ uint(SpecEventNodeStart): "Node",
+ uint(SpecEventNodeEnd): "Node (End)",
+ uint(SpecEventSpecRepeat): "Repeat",
+ uint(SpecEventSpecRetry): "Retry",
+})
+
+func (se SpecEventType) String() string {
+ return seEnumSupport.String(uint(se))
+}
+func (se *SpecEventType) UnmarshalJSON(b []byte) error {
+ out, err := seEnumSupport.UnmarshJSON(b)
+ *se = SpecEventType(out)
+ return err
+}
+func (se SpecEventType) MarshalJSON() ([]byte, error) {
+ return seEnumSupport.MarshJSON(uint(se))
+}
+
+func (se SpecEventType) Is(specEventTypes SpecEventType) bool {
+ return se&specEventTypes != 0
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
new file mode 100644
index 000000000..6bc46150e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -0,0 +1,3 @@
+package types
+
+const VERSION = "2.9.7"
diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore
index 720c13cba..425d0a509 100644
--- a/vendor/github.com/onsi/gomega/.gitignore
+++ b/vendor/github.com/onsi/gomega/.gitignore
@@ -3,3 +3,5 @@
.
.idea
gomega.iml
+TODO
+.vscode
\ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
deleted file mode 100644
index 6543dc553..000000000
--- a/vendor/github.com/onsi/gomega/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-arch:
- - amd64
- - ppc64le
-
-go:
- - gotip
- - 1.16.x
- - 1.15.x
-
-env:
- - GO111MODULE=on
-
-install: skip
-
-script:
- - go mod tidy && git diff --exit-code go.mod go.sum
- - make test
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 18190e8b9..07a3a5a82 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,307 @@
+## 1.27.7
+
+### Fixes
+- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5]
+
+### Maintenance
+- update gitignore [05c1bc6]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6]
+- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694]
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1]
+- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f]
+- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041]
+- Bump actions/setup-go from 3 to 4 (#651) [11b2080]
+
+## 1.27.6
+
+### Fixes
+- Allow collections matchers to work correctly when expected has nil elements [60e7cf3]
+
+### Maintenance
+- updates MatchError godoc comment to also accept a Gomega matcher (#654) [67b869d]
+
+## 1.27.5
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.9.2 (#653) [a215021]
+- Bump github.com/go-task/slim-sprig (#652) [a26fed8]
+
+## 1.27.4
+
+### Fixes
+- improve error formatting and remove duplication of error message in Eventually/Consistently [854f075]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#650) [ccebd9b]
+
+## 1.27.3
+
+### Fixes
+- format.Object now always includes err.Error() when passed an error [86d97ef]
+- Fix HaveExactElements to work inside ContainElement or other collection matchers (#648) [636757e]
+
+### Maintenance
+- Bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#649) [cc16689]
+- Bump github.com/onsi/ginkgo/v2 from 2.8.4 to 2.9.0 (#646) [e783366]
+
+## 1.27.2
+
+### Fixes
+- improve poll progress message when polling a consistently that has been passing [28a319b]
+
+### Maintenance
+- bump ginkgo
+- remove tools.go hack as Ginkgo 2.8.2 automatically pulls in the cli dependencies [81443b3]
+
+## 1.27.1
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd]
+
+## 1.27.0
+
+### Features
+- Add HaveExactElements matcher (#634) [9d50783]
+- update Gomega docs to discuss GinkgoHelper() [be32774]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b]
+- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b]
+- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab]
+- test: update matrix for Go 1.20 (#635) [6bd25c8]
+- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b]
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb]
+- codeql: add ruby language (#626) [63c7d21]
+- dependabot: add bundler package-ecosystem for docs (#625) [d92f963]
+
+## 1.26.0
+
+### Features
+- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090]
+- improve eventually failure message output [c530fb3]
+
+### Fixes
+- fix several documentation spelling issues [e2eff1f]
+
+
+## 1.25.0
+
+### Features
+- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72]
+- compare unwrapped errors using DeepEqual (#617) [aaeaa5d]
+
+### Maintenance
+- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4]
+- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb]
+- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda]
+- clean up go.sum [cd1dc1d]
+
+## 1.24.2
+
+### Fixes
+- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660]
+- docs:Fix typo "you an" -> "you can" (#607) [3187c1f]
+- fixes issue #600 (#606) [808d192]
+
+### Maintenance
+- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8]
+- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9]
+
+## 1.24.1
+
+### Fixes
+- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e]
+- fix small typo (#601) [ea0ebe6]
+
+### Maintenance
+- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372]
+- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb]
+- fix label-filter in test.yml [d795db6]
+- stop running flakey tests and rely on external network dependencies in CI [7133290]
+
+## 1.24.0
+
+### Features
+
+Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
+
+This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable.
+
+### Maintenance
+
+- Update BeComparableTo documentation [756eaa0]
+
+## 1.23.0
+
+### Features
+- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output)
+
+- Substantial improvement have been made to `StopTrying()`:
+ - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error
+ - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable.
+ - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions.
+
+- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration.
+
+- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`.
+
+## Maintenance
+
+- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3]
+- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665]
+
+## 1.22.1
+
+## Fixes
+- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf]
+- Allow StopTrying() to be wrapped [bf3cba9]
+
+## Maintenance
+- bump to ginkgo v2.3.0 [c5d5c39]
+
+## 1.22.0
+
+### Features
+
+Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases:
+
+- Eventually and Consistently can take a context.Context [65c01bc]
+ This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts.
+- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9]
+- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5]
+- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3]
+- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb]
+
+These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions)
+
+## Fixes
+
+## Maintenance
+
+## 1.21.1
+
+### Features
+- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9]
+
+## 1.21.0
+
+### Features
+- Eventually and Consistently can take a context.Context [65c01bc]
+ This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts.
+- Introduces Eventually.Within.ProbeEvery with tests and documentation (#591) [f633800]
+- New BeKeyOf matcher with documentation and unit tests (#590) [fb586b3]
+
+## Fixes
+- Cover the entire gmeasure suite with leak detection [8c54344]
+- Fix gmeasure leak [119d4ce]
+- Ignore new Ginkgo ProgressSignal goroutine in gleak [ba548e2]
+
+## Maintenance
+
+- Fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency (#596) [12469a0]
+
+
+## 1.20.2
+
+## Fixes
+- label specs that rely on remote access; bump timeout on short-circuit test to make it less flaky [35eeadf]
+- gexec: allow more headroom for SIGABRT-related unit tests (#581) [5b78f40]
+- Enable reading from a closed gbytes.Buffer (#575) [061fd26]
+
+## Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.1.5 to 2.1.6 (#583) [55d895b]
+- Bump github.com/onsi/ginkgo/v2 from 2.1.4 to 2.1.5 (#582) [346de7c]
+
+## 1.20.1
+
+## Fixes
+- fix false positive gleaks when using ginkgo -p (#577) [cb46517]
+- Fix typos in gomega_dsl.go (#569) [5f71ed2]
+- don't panic on Eventually(nil), fixing #555 (#567) [9d1186f]
+- vet optional description args in assertions, fixing #560 (#566) [8e37808]
+
+## Maintenance
+- test: add new Go 1.19 to test matrix (#571) [40d7efe]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#564) [5f26371]
+
+## 1.20.0
+
+## Features
+- New [`gleak`](https://onsi.github.io/gomega/#codegleakcode-finding-leaked-goroutines) experimental goroutine leak detection package! (#538) [85ba7bc]
+- New `BeComparableTo` matcher(#546) that uses `gocmp` to make comparisons [e77ea75]
+- New `HaveExistingField` matcher (#553) [fd130e1]
+- Document how to wrap Gomega (#539) [56714a4]
+
+## Fixes
+- Support pointer receivers in HaveField; fixes #543 (#544) [8dab36e]
+
+## Maintenance
+- Bump various dependencies:
+ - Upgrade to yaml.v3 (#556) [f5a83b1]
+ - Bump github/codeql-action from 1 to 2 (#549) [52f5adf]
+ - Bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#551) [5f3942d]
+ - Bump nokogiri from 1.13.4 to 1.13.6 in /docs (#554) [eb4b4c2]
+ - Use latest ginkgo (#535) [1c29028]
+ - Bump nokogiri from 1.13.3 to 1.13.4 in /docs (#541) [1ce84d5]
+ - Bump actions/setup-go from 2 to 3 (#540) [755485e]
+ - Bump nokogiri from 1.12.5 to 1.13.3 in /docs (#522) [4fbb0dc]
+ - Bump actions/checkout from 2 to 3 (#526) [ac49202]
+
+## 1.19.0
+
+## Features
+- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582]
+- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714]
+- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f]
+
+## Fixes
+- update RELEASING instructions to match ginkgo [0917cde]
+- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0]
+- Fix CVE-2021-38561 (#534) [f1b4456]
+- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
+- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
+- Fix for Go 1.18 (#532) [56d2a29]
+- Document precendence of timeouts (#533) [b607941]
+
+## 1.18.1
+
+## Fixes
+- Add pointer support to HaveField matcher (#495) [79e41a3]
+
+## 1.18.0
+
+## Features
+- Docs now live on the master branch in the docs folder which will make for easier PRs. The docs also use Ginkgo 2.0's new docs html/css/js. [2570272]
+- New HaveValue matcher can handle actuals that are either values (in which case they are passed on unscathed) or pointers (in which case they are indirected). [Docs here.](https://onsi.github.io/gomega/#working-with-values) (#485) [bdc087c]
+- Gmeasure has been declared GA [360db9d]
+
+## Fixes
+- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
+
+## Maintenace
+- Remove Travis workflow (#491) [72e6040]
+- Upgrade to Ginkgo 2.0.0 GA [f383637]
+- chore: fix description of HaveField matcher (#487) [2b4b2c0]
+- use tools.go to ensure Ginkgo cli dependencies are included [f58a52b]
+- remove dockerfile and simplify github actions to match ginkgo's actions [3f8160d]
+
+## 1.17.0
+
+### Features
+- Add HaveField matcher [3a26311]
+- add Error() assertions on the final error value of multi-return values (#480) [2f96943]
+- separate out offsets and timeouts (#478) [18a4723]
+- fix transformation error reporting (#479) [e001fab]
+- allow transform functions to report errors (#472) [bf93408]
+
+### Fixes
+Stop using deprecated ioutil package (#467) [07f405d]
+
## 1.16.0
### Features
diff --git a/vendor/github.com/onsi/gomega/Dockerfile b/vendor/github.com/onsi/gomega/Dockerfile
deleted file mode 100644
index 11c7e63e7..000000000
--- a/vendor/github.com/onsi/gomega/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM golang:1.15
diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile
deleted file mode 100644
index 1c6d107e1..000000000
--- a/vendor/github.com/onsi/gomega/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
-###### Help ###################################################################
-
-.DEFAULT_GOAL = help
-
-.PHONY: help
-
-help: ## list Makefile targets
- @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
-
-###### Targets ################################################################
-
-test: version download fmt vet ginkgo ## Runs all build, static analysis, and test steps
-
-download: ## Download dependencies
- go mod download
-
-vet: ## Run static code analysis
- go vet ./...
-
-ginkgo: ## Run tests using Ginkgo
- go run github.com/onsi/ginkgo/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
-
-fmt: ## Checks that the code is formatted correcty
- @@if [ -n "$$(gofmt -s -e -l -d .)" ]; then \
- echo "gofmt check failed: run 'gofmt -s -e -l -w .'"; \
- exit 1; \
- fi
-
-docker_test: ## Run tests in a container via docker-compose
- docker-compose build test && docker-compose run --rm test make test
-
-version: ## Display the version of Go
- @@go version
diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md
index 998d64ee7..9973fff49 100644
--- a/vendor/github.com/onsi/gomega/RELEASING.md
+++ b/vendor/github.com/onsi/gomega/RELEASING.md
@@ -1,12 +1,23 @@
A Gomega release is a tagged sha and a GitHub release. To cut a release:
1. Ensure CHANGELOG.md is up to date.
- - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
- Categorize the changes into
- Breaking Changes (requires a major version)
- New Features (minor version)
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-2. Update GOMEGA_VERSION in `gomega_dsl.go`
-3. Push a commit with the version number as the commit message (e.g. `v1.3.0`)
-4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
+1. Update GOMEGA_VERSION in `gomega_dsl.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/onsi/gomega/docker-compose.yaml b/vendor/github.com/onsi/gomega/docker-compose.yaml
deleted file mode 100644
index f37496143..000000000
--- a/vendor/github.com/onsi/gomega/docker-compose.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-version: '3.0'
-
-services:
- test:
- build:
- dockerfile: Dockerfile
- context: .
- working_dir: /app
- volumes:
- - ${PWD}:/app
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
index 6e78c391d..56bdd053b 100644
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -52,7 +52,7 @@ var CharactersAroundMismatchToInclude uint = 5
var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
var timeType = reflect.TypeOf(time.Time{})
-//The default indentation string emitted by the format package
+// The default indentation string emitted by the format package
var Indent = " "
var longFormThreshold = 20
@@ -65,6 +65,52 @@ type GomegaStringer interface {
GomegaString() string
}
+/*
+CustomFormatters can be registered with Gomega via RegisterCustomFormatter()
+Any value to be rendered by Gomega is passed to each registered CustomFormatters.
+The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true)
+If the CustomFormatter does not want to handle the object it should return ("", false)
+
+Strings returned by CustomFormatters are not truncated
+*/
+type CustomFormatter func(value interface{}) (string, bool)
+type CustomFormatterKey uint
+
+var customFormatterKey CustomFormatterKey = 1
+
+type customFormatterKeyPair struct {
+ CustomFormatter
+ CustomFormatterKey
+}
+
+/*
+RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey
+
+You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter
+*/
+func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey {
+ key := customFormatterKey
+ customFormatterKey += 1
+ customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key})
+ return key
+}
+
+/*
+UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter
+*/
+func UnregisterCustomFormatter(key CustomFormatterKey) {
+ formatters := []customFormatterKeyPair{}
+ for _, f := range customFormatters {
+ if f.CustomFormatterKey == key {
+ continue
+ }
+ formatters = append(formatters, f)
+ }
+ customFormatters = formatters
+}
+
+var customFormatters = []customFormatterKeyPair{}
+
/*
Generates a formatted matcher success/failure message of the form:
@@ -212,24 +258,35 @@ Set PrintContextObjects to true to print the content of objects implementing con
func Object(object interface{}, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
- return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation))
+ commonRepresentation := ""
+ if err, ok := object.(error); ok {
+ commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent
+ }
+ return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation))
}
/*
IndentString takes a string and indents each line by the specified amount.
*/
func IndentString(s string, indentation uint) string {
+ return indentString(s, indentation, true)
+}
+
+func indentString(s string, indentation uint, indentFirstLine bool) string {
+ result := &strings.Builder{}
components := strings.Split(s, "\n")
- result := ""
indent := strings.Repeat(Indent, int(indentation))
for i, component := range components {
- result += indent + component
+ if i > 0 || indentFirstLine {
+ result.WriteString(indent)
+ }
+ result.WriteString(component)
if i < len(components)-1 {
- result += "\n"
+ result.WriteString("\n")
}
}
- return result
+ return result.String()
}
func formatType(v reflect.Value) string {
@@ -261,18 +318,27 @@ func formatValue(value reflect.Value, indentation uint) string {
if value.CanInterface() {
obj := value.Interface()
+ // if a CustomFormatter handles this values, we'll go with that
+ for _, customFormatter := range customFormatters {
+ formatted, handled := customFormatter.CustomFormatter(obj)
+ // do not truncate a user-provided CustomFormatter()
+ if handled {
+ return indentString(formatted, indentation+1, false)
+ }
+ }
+
// GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation
if x, ok := obj.(GomegaStringer); ok {
- // do not truncate a user-defined GoMegaString() value
- return x.GomegaString()
+ // do not truncate a user-defined GomegaString() value
+ return indentString(x.GomegaString(), indentation+1, false)
}
if UseStringerRepresentation {
switch x := obj.(type) {
case fmt.GoStringer:
- return truncateLongStrings(x.GoString())
+ return indentString(truncateLongStrings(x.GoString()), indentation+1, false)
case fmt.Stringer:
- return truncateLongStrings(x.String())
+ return indentString(truncateLongStrings(x.String()), indentation+1, false)
}
}
}
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 84775142c..82ef52445 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.16.0"
+const GOMEGA_VERSION = "1.27.7"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -34,7 +34,7 @@ Depending on your vendoring solution you may be inadvertently importing gomega a
// to abstract between the standard package-level function implementations
// and alternatives like *WithT.
//
-// The types in the top-level DSL have gotten a bit messy due to earlier depracations that avoid stuttering
+// The types in the top-level DSL have gotten a bit messy due to earlier deprecations that avoid stuttering
// and due to an accidental use of a concrete type (*WithT) in an earlier release.
//
// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object
@@ -52,7 +52,7 @@ var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle()))
// rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures
// or for use in a non-test setting.
func NewGomega(fail types.GomegaFailHandler) Gomega {
- return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithFailHandler(fail)
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail)
}
// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
@@ -69,17 +69,31 @@ type WithT = internal.Gomega
// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
type GomegaWithT = WithT
-// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
+// inner is an interface that allows users to provide a wrapper around Default. The wrapper
+// must implement the inner interface and return either the original Default or the result of
+// a call to NewGomega().
+type inner interface {
+ Inner() Gomega
+}
+
+func internalGomega(g Gomega) *internal.Gomega {
+ if v, ok := g.(inner); ok {
+ return v.Inner().(*internal.Gomega)
+ }
+ return g.(*internal.Gomega)
+}
+
+// NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
// Gomega's rich ecosystem of matchers in standard `testing` test suits.
//
-// func TestFarmHasCow(t *testing.T) {
-// g := gomega.NewWithT(t)
+// func TestFarmHasCow(t *testing.T) {
+// g := gomega.NewWithT(t)
//
-// f := farm.New([]string{"Cow", "Horse"})
-// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
-// }
+// f := farm.New([]string{"Cow", "Horse"})
+// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
+// }
func NewWithT(t types.GomegaTestingT) *WithT {
- return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithT(t)
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t)
}
// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
@@ -88,37 +102,37 @@ var NewGomegaWithT = NewWithT
// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
// the fail handler passed into RegisterFailHandler is called.
func RegisterFailHandler(fail types.GomegaFailHandler) {
- Default.(*internal.Gomega).ConfigureWithFailHandler(fail)
+ internalGomega(Default).ConfigureWithFailHandler(fail)
}
// RegisterFailHandlerWithT is deprecated and will be removed in a future release.
// users should use RegisterFailHandler, or RegisterTestingT
func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) {
fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.")
- Default.(*internal.Gomega).ConfigureWithFailHandler(fail)
+ internalGomega(Default).ConfigureWithFailHandler(fail)
}
// RegisterTestingT connects Gomega to Golang's XUnit style
// Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test.
func RegisterTestingT(t types.GomegaTestingT) {
- Default.(*internal.Gomega).ConfigureWithT(t)
+ internalGomega(Default).ConfigureWithT(t)
}
// InterceptGomegaFailures runs a given callback and returns an array of
// failure messages generated by any Gomega assertions within the callback.
-// Exeuction continues after the first failure allowing users to collect all failures
+// Execution continues after the first failure allowing users to collect all failures
// in the callback.
//
// This is most useful when testing custom matchers, but can also be used to check
// on a value using a Gomega assertion without causing a test failure.
func InterceptGomegaFailures(f func()) []string {
- originalHandler := Default.(*internal.Gomega).Fail
+ originalHandler := internalGomega(Default).Fail
failures := []string{}
- Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) {
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
failures = append(failures, message)
}
defer func() {
- Default.(*internal.Gomega).Fail = originalHandler
+ internalGomega(Default).Fail = originalHandler
}()
f()
return failures
@@ -131,14 +145,14 @@ func InterceptGomegaFailures(f func()) []string {
// does not register a failure with the FailHandler registered via RegisterFailHandler - it is up
// to the user to decide what to do with the returned error
func InterceptGomegaFailure(f func()) (err error) {
- originalHandler := Default.(*internal.Gomega).Fail
- Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) {
+ originalHandler := internalGomega(Default).Fail
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
err = errors.New(message)
panic("stop execution")
}
defer func() {
- Default.(*internal.Gomega).Fail = originalHandler
+ internalGomega(Default).Fail = originalHandler
if e := recover(); e != nil {
if err == nil {
panic(e)
@@ -151,13 +165,14 @@ func InterceptGomegaFailure(f func()) (err error) {
}
func ensureDefaultGomegaIsConfigured() {
- if !Default.(*internal.Gomega).IsConfigured() {
+ if !internalGomega(Default).IsConfigured() {
panic(nilGomegaPanic)
}
}
// Ω wraps an actual value allowing assertions to be made on it:
-// Ω("foo").Should(Equal("foo"))
+//
+// Ω("foo").Should(Equal("foo"))
//
// If Ω is passed more than one argument it will pass the *first* argument to the matcher.
// All subsequent arguments will be required to be nil/zero.
@@ -166,10 +181,13 @@ func ensureDefaultGomegaIsConfigured() {
// a value and an error - a common patter in Go.
//
// For example, given a function with signature:
-// func MyAmazingThing() (int, error)
+//
+// func MyAmazingThing() (int, error)
//
// Then:
-// Ω(MyAmazingThing()).Should(Equal(3))
+//
+// Ω(MyAmazingThing()).Should(Equal(3))
+//
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Ω and Expect are identical
@@ -179,19 +197,23 @@ func Ω(actual interface{}, extra ...interface{}) Assertion {
}
// Expect wraps an actual value allowing assertions to be made on it:
-// Expect("foo").To(Equal("foo"))
+//
+// Expect("foo").To(Equal("foo"))
//
// If Expect is passed more than one argument it will pass the *first* argument to the matcher.
// All subsequent arguments will be required to be nil/zero.
//
// This is convenient if you want to make an assertion on a method/function that returns
-// a value and an error - a common patter in Go.
+// a value and an error - a common pattern in Go.
//
// For example, given a function with signature:
-// func MyAmazingThing() (int, error)
+//
+// func MyAmazingThing() (int, error)
//
// Then:
-// Expect(MyAmazingThing()).Should(Equal(3))
+//
+// Expect(MyAmazingThing()).Should(Equal(3))
+//
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Expect and Ω are identical
@@ -201,10 +223,12 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
}
// ExpectWithOffset wraps an actual value allowing assertions to be made on it:
-// ExpectWithOffset(1, "foo").To(Equal("foo"))
+//
+// ExpectWithOffset(1, "foo").To(Equal("foo"))
//
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
-// that is used to modify the call-stack offset when computing line numbers.
+// that is used to modify the call-stack offset when computing line numbers. It is
+// the same as `Expect(...).WithOffset`.
//
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
@@ -218,7 +242,7 @@ func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Asse
Eventually enables making assertions on asynchronous behavior.
Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments.
-The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds).
+The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout epxires or the context is cancelled, whichever comes first.
Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value:
@@ -226,15 +250,15 @@ Eventually works with any Gomega compatible matcher and supports making assertio
There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example:
- c := make(chan bool)
- go DoStuff(c)
- Eventually(c, "50ms").Should(BeClosed())
+ c := make(chan bool)
+ go DoStuff(c)
+ Eventually(c, "50ms").Should(BeClosed())
will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first.
-Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfuly via:
+Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via:
- Eventually(session).Should(gexec.Exit(0))
+ Eventually(session).Should(gexec.Exit(0))
And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen:
@@ -251,33 +275,57 @@ this will trigger Go's race detector as the goroutine polling via Eventually wil
**Category 2: Make Eventually assertions on functions**
-Eventually can be passed functions that **take no arguments** and **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher.
+Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher.
For example:
- Eventually(func() int {
- return client.FetchCount()
- }).Should(BeNumerically(">=", 17))
+ Eventually(func() int {
+ return client.FetchCount()
+ }).Should(BeNumerically(">=", 17))
- will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17)))
+ will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17)))
-If multple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common patternin Go.
+If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
For example, consider a method that returns a value and an error:
- func FetchFromDB() (string, error)
+
+ func FetchFromDB() (string, error)
Then
- Eventually(FetchFromDB).Should(Equal("got it"))
+
+ Eventually(FetchFromDB).Should(Equal("got it"))
will pass only if and when the returned error is nil *and* the returned string satisfies the matcher.
-It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. You should design your functions with this in mind.
+Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name:
+
+ func FetchFullName(userId int) (string, error)
+
+You can poll this function like so:
+
+ Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie"))
+
+It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually:
+
+ It("fetches the correct count", func(ctx SpecContext) {
+ Eventually(ctx, func() int {
+ return client.FetchCount(ctx, "/users")
+ }).Should(BeNumerically(">=", 17))
+ }, SpecTimeout(time.Second))
+
+you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with paseed-in arguments as long as the context appears first. You can rewrite the above example as:
+
+ It("fetches the correct count", func(ctx SpecContext) {
+ Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
+ }, SpecTimeout(time.Second))
+
+Either way the context passd to Eventually is also passed to the underlying funciton. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
**Category 3: Making assertions _in_ the function passed into Eventually**
When testing complex systems it can be valuable to assert that a _set_ of assertions passes Eventually. Eventually supports this by accepting functions that take a single Gomega argument and return zero or more values.
-Here's an example that makes some asssertions and returns a value and error:
+Here's an example that makes some assertions and returns a value and error:
Eventually(func(g Gomega) (Widget, error) {
ids, err := client.FetchIDs()
@@ -291,27 +339,62 @@ will pass only if all the assertions in the polled function pass and the return
Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed.
For example:
- Eventually(func(g Gomega) {
- model, err := client.Find(1138)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(model.Reticulate()).To(Succeed())
- g.Expect(model.IsReticulated()).To(BeTrue())
- g.Expect(model.Save()).To(Succeed())
- }).Should(Succeed())
+ Eventually(func(g Gomega) {
+ model, err := client.Find(1138)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(model.Reticulate()).To(Succeed())
+ g.Expect(model.IsReticulated()).To(BeTrue())
+ g.Expect(model.Save()).To(Succeed())
+ }).Should(Succeed())
will rerun the function until all assertions pass.
+
+You can also pass additional arugments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example:
+
+ Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){
+ tok, err := client.GetToken(ctx)
+ g.Expect(err).NotTo(HaveOccurred())
+
+ elements, err := client.Fetch(ctx, tok, path)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(elements).To(ConsistOf(expected))
+ }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed())
+
+You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example:
+
+ int count := 0
+ Eventually(func() bool {
+ count++
+ return count > 2
+ }).MustPassRepeatedly(2).Should(BeTrue())
+ // Because we had to wait for 2 calls that returned true
+ Expect(count).To(Equal(3))
+
+Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods:
+
+ Eventually(..., "1s", "2s", ctx).Should(...)
+
+is equivalent to
+
+ Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
*/
-func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
+func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
- return Default.Eventually(actual, intervals...)
+ return Default.Eventually(actualOrCtx, args...)
}
// EventuallyWithOffset operates like Eventually but takes an additional
// initial argument to indicate an offset in the call stack. This is useful when building helper
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
+//
+// `EventuallyWithOffset` is the same as `Eventually(...).WithOffset`.
+//
+// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
+// the same as `Eventually(...).WithOffset(...).WithTimeout` or
+// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
+func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
- return Default.EventuallyWithOffset(offset, actual, intervals...)
+ return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
}
/*
@@ -319,29 +402,75 @@ Consistently, like Eventually, enables making assertions on asynchronous behavio
Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail.
-Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional arugment is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds.
+Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. You can also pass in an optional context.Context - Consistently will exit early (with a failure) if the context is cancelled before the waiting duration expires.
Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more.
Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write:
- Consistently(channel, "200ms").ShouldNot(Receive())
+ Consistently(channel, "200ms").ShouldNot(Receive())
This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
*/
-func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
+func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
- return Default.Consistently(actual, intervals...)
+ return Default.Consistently(actualOrCtx, args...)
}
// ConsistentlyWithOffset operates like Consistently but takes an additional
// initial argument to indicate an offset in the call stack. This is useful when building helper
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
+//
+// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
+// optional `WithTimeout` and `WithPolling`.
+func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
- return Default.ConsistentlyWithOffset(offset, actual, intervals...)
+ return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
}
+/*
+StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
+
+You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution.
+
+You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting.
+
+Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop:
+
+ playerIndex, numPlayers := 0, 11
+ Eventually(func() (string, error) {
+ if playerIndex == numPlayers {
+ return "", StopTrying("no more players left")
+ }
+ name := client.FetchPlayer(playerIndex)
+ playerIndex += 1
+ return name, nil
+ }).Should(Equal("Patrick Mahomes"))
+
+And here's an example where `StopTrying().Now()` is called to halt execution immediately:
+
+ Eventually(func() []string {
+ names, err := client.FetchAllPlayers()
+ if err == client.IRRECOVERABLE_ERROR {
+ StopTrying("Irrecoverable error occurred").Wrap(err).Now()
+ }
+ return names
+ }).Should(ContainElement("Patrick Mahomes"))
+*/
+var StopTrying = internal.StopTrying
+
+/*
+TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()`
+
+When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`.
+*/
+var TryAgainAfter = internal.TryAgainAfter
+
+/*
+PollingSignalError is the error returned by StopTrying() and TryAgainAfter()
+*/
+type PollingSignalError = internal.PollingSignalError
+
// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
func SetDefaultEventuallyTimeout(t time.Duration) {
Default.SetDefaultEventuallyTimeout(t)
@@ -375,8 +504,8 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) {
//
// Example:
//
-// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
-// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." })
+// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
+// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." })
type AsyncAssertion = types.AsyncAssertion
// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter.
@@ -398,7 +527,7 @@ type GomegaAsyncAssertion = types.AsyncAssertion
//
// Example:
//
-// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
+// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
type Assertion = types.Assertion
// GomegaAssertion is deprecated in favor of Assertion, which does not stutter.
diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go
index 36b0e8345..08356a610 100644
--- a/vendor/github.com/onsi/gomega/internal/assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -4,48 +4,74 @@ import (
"fmt"
"reflect"
+ "github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
type Assertion struct {
- actualInput interface{}
+ actuals []interface{} // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
offset int
- extra []interface{}
g *Gomega
}
+// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
+type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+
func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
return &Assertion{
- actualInput: actualInput,
+ actuals: append([]interface{}{actualInput}, extra...),
+ actualIndex: 0,
+ vet: (*Assertion).vetActuals,
offset: offset,
- extra: extra,
g: g,
}
}
+func (assertion *Assertion) WithOffset(offset int) types.Assertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *Assertion) Error() types.Assertion {
+ return &Assertion{
+ actuals: assertion.actuals,
+ actualIndex: len(assertion.actuals) - 1,
+ vet: (*Assertion).vetError,
+ offset: assertion.offset,
+ g: assertion.g,
+ }
+}
+
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
@@ -61,7 +87,8 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{})
}
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
- matches, err := matcher.Match(assertion.actualInput)
+ actualInput := assertion.actuals[assertion.actualIndex]
+ matches, err := matcher.Match(actualInput)
assertion.g.THelper()
if err != nil {
description := assertion.buildDescription(optionalDescription...)
@@ -71,9 +98,9 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
if matches != desiredMatch {
var message string
if desiredMatch {
- message = matcher.FailureMessage(assertion.actualInput)
+ message = matcher.FailureMessage(actualInput)
} else {
- message = matcher.NegatedFailureMessage(assertion.actualInput)
+ message = matcher.NegatedFailureMessage(actualInput)
}
description := assertion.buildDescription(optionalDescription...)
assertion.g.Fail(description+message, 2+assertion.offset)
@@ -83,8 +110,11 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
return true
}
-func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
- success, message := vetExtras(assertion.extra)
+// vetActuals vets the actual values, with the (optional) exception of a
+// specific value, such as the first value in case non-error assertions, or the
+// last value in case of Error()-based assertions.
+func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+ success, message := vetActuals(assertion.actuals, assertion.actualIndex)
if success {
return true
}
@@ -95,12 +125,34 @@ func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
return false
}
-func vetExtras(extras []interface{}) (bool, string) {
- for i, extra := range extras {
- if extra != nil {
- zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
- if !reflect.DeepEqual(zeroValue, extra) {
- message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
+// vetError vets the actual values, except for the final error value, in case
+// the final error value is non-zero. Otherwise, it doesn't vet the actual
+// values, as these are allowed to take on any values unless there is a non-zero
+// error value.
+func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+ if err := assertion.actuals[assertion.actualIndex]; err != nil {
+ // Go error result idiom: all other actual values must be zero values.
+ return assertion.vetActuals(optionalDescription...)
+ }
+ return true
+}
+
+// vetActuals vets a slice of actual values, optionally skipping a particular
+// value slice element, such as the first or last value slice element.
+func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+ for i, actual := range actuals {
+ if i == skipIndex {
+ continue
+ }
+ if actual != nil {
+ zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if !reflect.DeepEqual(zeroValue, actual) {
+ var message string
+ if err, ok := actual.(error); ok {
+ message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1))
+ } else {
+ message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual)
+ }
return false, message
}
}
diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go
index ae20c14b8..1188b0bce 100644
--- a/vendor/github.com/onsi/gomega/internal/async_assertion.go
+++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -1,15 +1,53 @@
package internal
import (
+ "context"
"errors"
"fmt"
"reflect"
"runtime"
+ "sync"
"time"
+ "github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem()
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+
+type formattedGomegaError interface {
+ FormattedGomegaError() string
+}
+
+type asyncPolledActualError struct {
+ message string
+}
+
+func (err *asyncPolledActualError) Error() string {
+ return err.message
+}
+
+func (err *asyncPolledActualError) FormattedGomegaError() string {
+ return err.message
+}
+
+type contextWithAttachProgressReporter interface {
+ AttachProgressReporter(func() string) func()
+}
+
+type asyncGomegaHaltExecutionError struct{}
+
+func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {}
+func (a asyncGomegaHaltExecutionError) Error() string {
+ return `An assertion has failed in a goroutine. You should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.`
+}
+
type AsyncAssertionType uint
const (
@@ -17,83 +55,99 @@ const (
AsyncAssertionTypeConsistently
)
+func (at AsyncAssertionType) String() string {
+ switch at {
+ case AsyncAssertionTypeEventually:
+ return "Eventually"
+ case AsyncAssertionTypeConsistently:
+ return "Consistently"
+ }
+ return "INVALID ASYNC ASSERTION TYPE"
+}
+
type AsyncAssertion struct {
asyncType AsyncAssertionType
- actualIsFunc bool
- actualValue interface{}
- actualFunc func() ([]reflect.Value, error)
+ actualIsFunc bool
+ actual interface{}
+ argsToForward []interface{}
- timeoutInterval time.Duration
- pollingInterval time.Duration
- offset int
- g *Gomega
+ timeoutInterval time.Duration
+ pollingInterval time.Duration
+ mustPassRepeatedly int
+ ctx context.Context
+ offset int
+ g *Gomega
}
-func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
+func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
out := &AsyncAssertion{
- asyncType: asyncType,
- timeoutInterval: timeoutInterval,
- pollingInterval: pollingInterval,
- offset: offset,
- g: g,
+ asyncType: asyncType,
+ timeoutInterval: timeoutInterval,
+ pollingInterval: pollingInterval,
+ mustPassRepeatedly: mustPassRepeatedly,
+ offset: offset,
+ ctx: ctx,
+ g: g,
}
- switch actualType := reflect.TypeOf(actualInput); {
- case actualType.Kind() != reflect.Func:
- out.actualValue = actualInput
- case actualType.NumIn() == 0 && actualType.NumOut() > 0:
+ out.actual = actualInput
+ if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func {
out.actualIsFunc = true
- out.actualFunc = func() ([]reflect.Value, error) {
- return reflect.ValueOf(actualInput).Call([]reflect.Value{}), nil
- }
- case actualType.NumIn() == 1 && actualType.In(0).Implements(reflect.TypeOf((*types.Gomega)(nil)).Elem()):
- out.actualIsFunc = true
- out.actualFunc = func() (values []reflect.Value, err error) {
- var assertionFailure error
- assertionCapturingGomega := NewGomega(g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
- _, file, line, _ := runtime.Caller(skip + 1)
- assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message)
- panic("stop execution")
- })
-
- defer func() {
- if actualType.NumOut() == 0 {
- if assertionFailure == nil {
- values = []reflect.Value{reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())}
- } else {
- values = []reflect.Value{reflect.ValueOf(assertionFailure)}
- }
- } else {
- err = assertionFailure
- }
- if e := recover(); e != nil && assertionFailure == nil {
- panic(e)
- }
- }()
-
- values = reflect.ValueOf(actualInput).Call([]reflect.Value{reflect.ValueOf(assertionCapturingGomega)})
- return
- }
- default:
- msg := fmt.Sprintf("The function passed to Gomega's async assertions should either take no arguments and return values, or take a single Gomega interface that it can use to make assertions within the body of the function. When taking a Gomega interface the function can optionally return values or return nothing. The function you passed takes %d arguments and returns %d values.", actualType.NumIn(), actualType.NumOut())
- g.Fail(msg, offset+4)
}
return out
}
+func (assertion *AsyncAssertion) WithOffset(offset int) types.AsyncAssertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithTimeout(interval time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) Within(timeout time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = timeout
+ return assertion
+}
+
+func (assertion *AsyncAssertion) ProbeEvery(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAssertion {
+ assertion.ctx = ctx
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion {
+ assertion.argsToForward = argsToForward
+ return assertion
+}
+
+func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion {
+ assertion.mustPassRepeatedly = count
+ return assertion
+}
+
func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
+ vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, true, optionalDescription...)
}
func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
assertion.g.THelper()
+ vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, false, optionalDescription...)
}
@@ -109,112 +163,409 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
+func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) {
+ if len(values) == 0 {
+ return nil, &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType),
+ }
+ }
+
+ actual := values[0].Interface()
+ if _, ok := AsPollingSignalError(actual); ok {
+ return actual, actual.(error)
+ }
+
+ var err error
+ for i, extraValue := range values[1:] {
+ extra := extraValue.Interface()
+ if extra == nil {
+ continue
+ }
+ if _, ok := AsPollingSignalError(extra); ok {
+ return actual, extra.(error)
+ }
+ extraType := reflect.TypeOf(extra)
+ zero := reflect.Zero(extraType).Interface()
+ if reflect.DeepEqual(extra, zero) {
+ continue
+ }
+ if i == len(values)-2 && extraType.Implements(errInterface) {
+ err = extra.(error)
+ }
+ if err == nil {
+ err = &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)),
+ }
+ }
+ }
+
+ return actual, err
+}
+
+func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error {
+ return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either:
+
+ (a) have return values or
+ (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions.
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, t, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error {
+ return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext().
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error {
+ have := "have"
+ if numProvided == 1 {
+ have = "has"
+ }
+ return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments.
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error {
+ return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, reason)
+}
+
+func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) {
if !assertion.actualIsFunc {
- return assertion.actualValue, nil
+ return func() (interface{}, error) { return assertion.actual, nil }, nil
+ }
+ actualValue := reflect.ValueOf(assertion.actual)
+ actualType := reflect.TypeOf(assertion.actual)
+ numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic()
+
+ if numIn == 0 && numOut == 0 {
+ return nil, assertion.invalidFunctionError(actualType)
+ }
+ takesGomega, takesContext := false, false
+ if numIn > 0 {
+ takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType)
+ }
+ if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) {
+ takesContext = true
+ }
+ if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) {
+ takesContext = false
+ }
+ if !takesGomega && numOut == 0 {
+ return nil, assertion.invalidFunctionError(actualType)
+ }
+ if takesContext && assertion.ctx == nil {
+ return nil, assertion.noConfiguredContextForFunctionError()
+ }
+
+ var assertionFailure error
+ inValues := []reflect.Value{}
+ if takesGomega {
+ inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ _, file, line, _ := runtime.Caller(skip + 1)
+ assertionFailure = &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message),
+ }
+ // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine
+ panic(asyncGomegaHaltExecutionError{})
+ })))
+ }
+ if takesContext {
+ inValues = append(inValues, reflect.ValueOf(assertion.ctx))
+ }
+ for _, arg := range assertion.argsToForward {
+ inValues = append(inValues, reflect.ValueOf(arg))
+ }
+
+ if !isVariadic && numIn != len(inValues) {
+ return nil, assertion.argumentMismatchError(actualType, len(inValues))
+ } else if isVariadic && len(inValues) < numIn-1 {
+ return nil, assertion.argumentMismatchError(actualType, len(inValues))
}
- values, err := assertion.actualFunc()
- if err != nil {
- return nil, err
+ if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually {
+ return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually")
+ }
+ if assertion.mustPassRepeatedly < 1 {
+ return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1")
}
- extras := []interface{}{}
- for _, value := range values[1:] {
- extras = append(extras, value.Interface())
+
+ return func() (actual interface{}, err error) {
+ var values []reflect.Value
+ assertionFailure = nil
+ defer func() {
+ if numOut == 0 && takesGomega {
+ actual = assertionFailure
+ } else {
+ actual, err = assertion.processReturnValues(values)
+ _, isAsyncError := AsPollingSignalError(err)
+ if assertionFailure != nil && !isAsyncError {
+ err = assertionFailure
+ }
+ }
+ if e := recover(); e != nil {
+ if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
+ err = e.(error)
+ } else if assertionFailure == nil {
+ panic(e)
+ }
+ }
+ }()
+ values = actualValue.Call(inValues)
+ return
+ }, nil
+}
+
+func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time {
+ if assertion.timeoutInterval >= 0 {
+ return time.After(assertion.timeoutInterval)
}
- success, message := vetExtras(extras)
- if !success {
- return nil, errors.New(message)
+
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ return time.After(assertion.g.DurationBundle.ConsistentlyDuration)
+ } else {
+ if assertion.ctx == nil {
+ return time.After(assertion.g.DurationBundle.EventuallyTimeout)
+ } else {
+ return nil
+ }
}
+}
- return values[0].Interface(), nil
+func (assertion *AsyncAssertion) afterPolling() <-chan time.Time {
+ if assertion.pollingInterval >= 0 {
+ return time.After(assertion.pollingInterval)
+ }
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval)
+ } else {
+ return time.After(assertion.g.DurationBundle.EventuallyPollingInterval)
+ }
}
-func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
- if assertion.actualIsFunc {
- return true
+func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool {
+ if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) {
+ return false
}
- return types.MatchMayChangeInTheFuture(matcher, value)
+ return true
+}
+
+func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
+ err = e.(error)
+ } else {
+ panic(e)
+ }
+ }
+ }()
+
+ matches, err = matcher.Match(value)
+
+ return
}
func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
timer := time.Now()
- timeout := time.After(assertion.timeoutInterval)
+ timeout := assertion.afterTimeout()
+ lock := sync.Mutex{}
- var matches bool
- var err error
- mayChange := true
- value, err := assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
+ var matches, hasLastValidActual bool
+ var actual, lastValidActual interface{}
+ var actualErr, matcherErr error
+ var oracleMatcherSaysStop bool
assertion.g.THelper()
- fail := func(preamble string) {
- errMsg := ""
+ pollActual, buildActualPollerErr := assertion.buildActualPoller()
+ if buildActualPollerErr != nil {
+ assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset)
+ return false
+ }
+
+ actual, actualErr = pollActual()
+ if actualErr == nil {
+ lastValidActual = actual
+ hasLastValidActual = true
+ oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual)
+ matches, matcherErr = assertion.pollMatcher(matcher, actual)
+ }
+
+ renderError := func(preamble string, err error) string {
message := ""
- if err != nil {
- errMsg = "Error: " + err.Error()
+ if pollingSignalErr, ok := AsPollingSignalError(err); ok {
+ message = err.Error()
+ for _, attachment := range pollingSignalErr.Attachments {
+ message += fmt.Sprintf("\n%s:\n", attachment.Description)
+ message += format.Object(attachment.Object, 1)
+ }
+ } else {
+ message = preamble + "\n" + format.Object(err, 1)
+ }
+ return message
+ }
+
+ messageGenerator := func() string {
+ // can be called out of band by Ginkgo if the user requests a progress report
+ lock.Lock()
+ defer lock.Unlock()
+ message := ""
+
+ if actualErr == nil {
+ if matcherErr == nil {
+ if desiredMatch != matches {
+ if desiredMatch {
+ message += matcher.FailureMessage(actual)
+ } else {
+ message += matcher.NegatedFailureMessage(actual)
+ }
+ } else {
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ message += "There is no failure as the matcher passed to Consistently has not yet failed"
+ } else {
+ message += "There is no failure as the matcher passed to Eventually succeeded on its most recent iteration"
+ }
+ }
+ } else {
+ var fgErr formattedGomegaError
+ if errors.As(actualErr, &fgErr) {
+ message += fgErr.FormattedGomegaError() + "\n"
+ } else {
+ message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr)
+ }
+ }
} else {
- if desiredMatch {
- message = matcher.FailureMessage(value)
+ var fgErr formattedGomegaError
+ if errors.As(actualErr, &fgErr) {
+ message += fgErr.FormattedGomegaError() + "\n"
} else {
- message = matcher.NegatedFailureMessage(value)
+ message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr)
+ }
+ if hasLastValidActual {
+ message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType)
+ _, e := matcher.Match(lastValidActual)
+ if e != nil {
+ message += renderError(" the matcher returned the following error:", e)
+ } else {
+ message += " the matcher was not satisfied:\n"
+ if desiredMatch {
+ message += matcher.FailureMessage(lastValidActual)
+ } else {
+ message += matcher.NegatedFailureMessage(lastValidActual)
+ }
+ }
}
}
- assertion.g.THelper()
+
description := assertion.buildDescription(optionalDescription...)
- assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
+ return fmt.Sprintf("%s%s", description, message)
}
- if assertion.asyncType == AsyncAssertionTypeEventually {
- for {
- if err == nil && matches == desiredMatch {
- return true
- }
+ fail := func(preamble string) {
+ assertion.g.THelper()
+ assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s", preamble, time.Since(timer).Seconds(), messageGenerator()), 3+assertion.offset)
+ }
- if !mayChange {
- fail("No future change is possible. Bailing out early")
- return false
- }
+ var contextDone <-chan struct{}
+ if assertion.ctx != nil {
+ contextDone = assertion.ctx.Done()
+ if v, ok := assertion.ctx.Value("GINKGO_SPEC_CONTEXT").(contextWithAttachProgressReporter); ok {
+ detach := v.AttachProgressReporter(messageGenerator)
+ defer detach()
+ }
+ }
- select {
- case <-time.After(assertion.pollingInterval):
- value, err = assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
+ // Used to count the number of times in a row a step passed
+ passedRepeatedlyCount := 0
+ for {
+ var nextPoll <-chan time.Time = nil
+ var isTryAgainAfterError = false
+
+ for _, err := range []error{actualErr, matcherErr} {
+ if pollingSignalErr, ok := AsPollingSignalError(err); ok {
+ if pollingSignalErr.IsStopTrying() {
+ fail("Told to stop trying")
+ return false
+ }
+ if pollingSignalErr.IsTryAgainAfter() {
+ nextPoll = time.After(pollingSignalErr.TryAgainDuration())
+ isTryAgainAfterError = true
}
- case <-timeout:
- fail("Timed out")
- return false
}
}
- } else if assertion.asyncType == AsyncAssertionTypeConsistently {
- for {
- if !(err == nil && matches == desiredMatch) {
+
+ if actualErr == nil && matcherErr == nil && matches == desiredMatch {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ passedRepeatedlyCount += 1
+ if passedRepeatedlyCount == assertion.mustPassRepeatedly {
+ return true
+ }
+ }
+ } else if !isTryAgainAfterError {
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
fail("Failed")
return false
}
+ // Reset the consecutive pass count
+ passedRepeatedlyCount = 0
+ }
- if !mayChange {
+ if oracleMatcherSaysStop {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("No future change is possible. Bailing out early")
+ return false
+ } else {
return true
}
+ }
+
+ if nextPoll == nil {
+ nextPoll = assertion.afterPolling()
+ }
- select {
- case <-time.After(assertion.pollingInterval):
- value, err = assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
+ select {
+ case <-nextPoll:
+ a, e := pollActual()
+ lock.Lock()
+ actual, actualErr = a, e
+ lock.Unlock()
+ if actualErr == nil {
+ lock.Lock()
+ lastValidActual = actual
+ hasLastValidActual = true
+ lock.Unlock()
+ oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual)
+ m, e := assertion.pollMatcher(matcher, actual)
+ lock.Lock()
+ matches, matcherErr = m, e
+ lock.Unlock()
+ }
+ case <-contextDone:
+ fail("Context was cancelled")
+ return false
+ case <-timeout:
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("Timed out")
+ return false
+ } else {
+ if isTryAgainAfterError {
+ fail("Timed out while waiting on TryAgainAfter")
+ return false
}
- case <-timeout:
return true
}
}
}
-
- return false
}
diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
index af8d989fa..6e0d90d3a 100644
--- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go
+++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go
@@ -44,28 +44,28 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration {
return duration
}
-func toDuration(input interface{}) time.Duration {
+func toDuration(input interface{}) (time.Duration, error) {
duration, ok := input.(time.Duration)
if ok {
- return duration
+ return duration, nil
}
value := reflect.ValueOf(input)
kind := reflect.TypeOf(input).Kind()
if reflect.Int <= kind && kind <= reflect.Int64 {
- return time.Duration(value.Int()) * time.Second
+ return time.Duration(value.Int()) * time.Second, nil
} else if reflect.Uint <= kind && kind <= reflect.Uint64 {
- return time.Duration(value.Uint()) * time.Second
+ return time.Duration(value.Uint()) * time.Second, nil
} else if reflect.Float32 <= kind && kind <= reflect.Float64 {
- return time.Duration(value.Float() * float64(time.Second))
+ return time.Duration(value.Float() * float64(time.Second)), nil
} else if reflect.String == kind {
duration, err := time.ParseDuration(value.String())
if err != nil {
- panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
+ return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err)
}
- return duration
+ return duration, nil
}
- panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
+ return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input)
}
diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go
index f5b5c6b7a..de1f4f336 100644
--- a/vendor/github.com/onsi/gomega/internal/gomega.go
+++ b/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -1,6 +1,7 @@
package internal
import (
+ "context"
"time"
"github.com/onsi/gomega/types"
@@ -39,50 +40,76 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
return g
}
-func (g *Gomega) Ω(atual interface{}, extra ...interface{}) types.Assertion {
- return g.ExpectWithOffset(0, atual, extra...)
+func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) Expect(atual interface{}, extra ...interface{}) types.Assertion {
- return g.ExpectWithOffset(0, atual, extra...)
+func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
}
func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
return NewAssertion(actual, g, offset, extra...)
}
-func (g *Gomega) Eventually(actual interface{}, intervals ...interface{}) types.AsyncAssertion {
- return g.EventuallyWithOffset(0, actual, intervals...)
+func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
}
-func (g *Gomega) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion {
- timeoutInterval := g.DurationBundle.EventuallyTimeout
- pollingInterval := g.DurationBundle.EventuallyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
+func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
+}
- return NewAsyncAssertion(AsyncAssertionTypeEventually, actual, g, timeoutInterval, pollingInterval, offset)
+func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
}
-func (g *Gomega) Consistently(actual interface{}, intervals ...interface{}) types.AsyncAssertion {
- return g.ConsistentlyWithOffset(0, actual, intervals...)
+func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
}
-func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion {
- timeoutInterval := g.DurationBundle.ConsistentlyDuration
- pollingInterval := g.DurationBundle.ConsistentlyPollingInterval
+func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ baseOffset := 3
+ timeoutInterval := -time.Duration(1)
+ pollingInterval := -time.Duration(1)
+ intervals := []interface{}{}
+ var ctx context.Context
+
+ actual := actualOrCtx
+ startingIndex := 0
+ if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
+ // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
+ // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
+ if _, err := toDuration(args[0]); err != nil {
+ ctx = actualOrCtx.(context.Context)
+ actual = args[0]
+ startingIndex = 1
+ }
+ }
+
+ for _, arg := range args[startingIndex:] {
+ switch v := arg.(type) {
+ case context.Context:
+ ctx = v
+ default:
+ intervals = append(intervals, arg)
+ }
+ }
+ var err error
if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
+ timeoutInterval, err = toDuration(intervals[0])
+ if err != nil {
+ g.Fail(err.Error(), offset+baseOffset)
+ }
}
if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
+ pollingInterval, err = toDuration(intervals[1])
+ if err != nil {
+ g.Fail(err.Error(), offset+baseOffset)
+ }
}
- return NewAsyncAssertion(AsyncAssertionTypeConsistently, actual, g, timeoutInterval, pollingInterval, offset)
+ return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset)
}
func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) {
diff --git a/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
new file mode 100644
index 000000000..6864055a5
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
@@ -0,0 +1,48 @@
+//go:build go1.16
+// +build go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.16 and higher, this implementation
+// uses the ioutil replacement functions in "io" and "os" with some
+// Gomega specifics. This means that we should not get deprecation warnings
+// for ioutil when they are added.
+package gutil
+
+import (
+ "io"
+ "os"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return io.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ entries, err := os.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, entry := range entries {
+ names = append(names, entry.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return os.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return os.MkdirTemp(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return os.WriteFile(filename, data, 0644)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
new file mode 100644
index 000000000..5c0ce1ee3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
@@ -0,0 +1,47 @@
+//go:build !go1.16
+// +build !go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.15 and lower, this implementation
+// uses the ioutil functions, meaning that although Gomega is not officially
+// supported on these versions, it is still likely to work.
+package gutil
+
+import (
+ "io"
+ "io/ioutil"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return ioutil.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return ioutil.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ files, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, file := range files {
+ names = append(names, file.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return ioutil.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return ioutil.TempDir(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return ioutil.WriteFile(filename, data, 0644)
+}
diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
new file mode 100644
index 000000000..83b04b1a4
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
@@ -0,0 +1,106 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+type PollingSignalErrorType int
+
+const (
+ PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota
+ PollingSignalErrorTypeTryAgainAfter
+)
+
+type PollingSignalError interface {
+ error
+ Wrap(err error) PollingSignalError
+ Attach(description string, obj any) PollingSignalError
+ Now()
+}
+
+var StopTrying = func(message string) PollingSignalError {
+ return &PollingSignalErrorImpl{
+ message: message,
+ pollingSignalErrorType: PollingSignalErrorTypeStopTrying,
+ }
+}
+
+var TryAgainAfter = func(duration time.Duration) PollingSignalError {
+ return &PollingSignalErrorImpl{
+ message: fmt.Sprintf("told to try again after %s", duration),
+ duration: duration,
+ pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter,
+ }
+}
+
+type PollingSignalErrorAttachment struct {
+ Description string
+ Object any
+}
+
+type PollingSignalErrorImpl struct {
+ message string
+ wrappedErr error
+ pollingSignalErrorType PollingSignalErrorType
+ duration time.Duration
+ Attachments []PollingSignalErrorAttachment
+}
+
+func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError {
+ s.wrappedErr = err
+ return s
+}
+
+func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError {
+ s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj})
+ return s
+}
+
+func (s *PollingSignalErrorImpl) Error() string {
+ if s.wrappedErr == nil {
+ return s.message
+ } else {
+ return s.message + ": " + s.wrappedErr.Error()
+ }
+}
+
+func (s *PollingSignalErrorImpl) Unwrap() error {
+ if s == nil {
+ return nil
+ }
+ return s.wrappedErr
+}
+
+func (s *PollingSignalErrorImpl) Now() {
+ panic(s)
+}
+
+func (s *PollingSignalErrorImpl) IsStopTrying() bool {
+ return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying
+}
+
+func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool {
+ return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter
+}
+
+func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration {
+ return s.duration
+}
+
+func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) {
+ if actual == nil {
+ return nil, false
+ }
+ if actualErr, ok := actual.(error); ok {
+ var target *PollingSignalErrorImpl
+ if errors.As(actualErr, &target) {
+ return target, true
+ } else {
+ return nil, false
+ }
+ }
+
+ return nil, false
+}
diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
new file mode 100644
index 000000000..f29587641
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
@@ -0,0 +1,22 @@
+package internal
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/types"
+)
+
+// vetOptionalDescription vets the optional description args: if it finds any
+// Gomega matcher at the beginning it panics. This allows for rendering Gomega
+// matchers as part of an optional Description, as long as they're not in the
+// first slot.
+func vetOptionalDescription(assertion string, optionalDescription ...interface{}) {
+ if len(optionalDescription) == 0 {
+ return
+ }
+ if _, isGomegaMatcher := optionalDescription[0].(types.GomegaMatcher); isGomegaMatcher {
+ panic(fmt.Sprintf("%s has a GomegaMatcher as the first element of optionalDescription.\n\t"+
+ "Do you mean to use And/Or/SatisfyAll/SatisfyAny to combine multiple matchers?",
+ assertion))
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index 223f6ef53..b832f3dba 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -3,139 +3,161 @@ package gomega
import (
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/onsi/gomega/matchers"
"github.com/onsi/gomega/types"
)
-//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
-//types when performing comparisons.
-//It is an error for both actual and expected to be nil. Use BeNil() instead.
+// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
+// types when performing comparisons.
+// It is an error for both actual and expected to be nil. Use BeNil() instead.
func Equal(expected interface{}) types.GomegaMatcher {
return &matchers.EqualMatcher{
Expected: expected,
}
}
-//BeEquivalentTo is more lax than Equal, allowing equality between different types.
-//This is done by converting actual to have the type of expected before
-//attempting equality with reflect.DeepEqual.
-//It is an error for actual and expected to be nil. Use BeNil() instead.
+// BeEquivalentTo is more lax than Equal, allowing equality between different types.
+// This is done by converting actual to have the type of expected before
+// attempting equality with reflect.DeepEqual.
+// It is an error for actual and expected to be nil. Use BeNil() instead.
func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
return &matchers.BeEquivalentToMatcher{
Expected: expected,
}
}
-//BeIdenticalTo uses the == operator to compare actual with expected.
-//BeIdenticalTo is strict about types when performing comparisons.
-//It is an error for both actual and expected to be nil. Use BeNil() instead.
+// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison.
+// You can pass cmp.Option as options.
+// It is an error for actual and expected to be nil. Use BeNil() instead.
+func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher {
+ return &matchers.BeComparableToMatcher{
+ Expected: expected,
+ Options: opts,
+ }
+}
+
+// BeIdenticalTo uses the == operator to compare actual with expected.
+// BeIdenticalTo is strict about types when performing comparisons.
+// It is an error for both actual and expected to be nil. Use BeNil() instead.
func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
return &matchers.BeIdenticalToMatcher{
Expected: expected,
}
}
-//BeNil succeeds if actual is nil
+// BeNil succeeds if actual is nil
func BeNil() types.GomegaMatcher {
return &matchers.BeNilMatcher{}
}
-//BeTrue succeeds if actual is true
+// BeTrue succeeds if actual is true
func BeTrue() types.GomegaMatcher {
return &matchers.BeTrueMatcher{}
}
-//BeFalse succeeds if actual is false
+// BeFalse succeeds if actual is false
func BeFalse() types.GomegaMatcher {
return &matchers.BeFalseMatcher{}
}
-//HaveOccurred succeeds if actual is a non-nil error
-//The typical Go error checking pattern looks like:
-// err := SomethingThatMightFail()
-// Expect(err).ShouldNot(HaveOccurred())
+// HaveOccurred succeeds if actual is a non-nil error
+// The typical Go error checking pattern looks like:
+//
+// err := SomethingThatMightFail()
+// Expect(err).ShouldNot(HaveOccurred())
func HaveOccurred() types.GomegaMatcher {
return &matchers.HaveOccurredMatcher{}
}
-//Succeed passes if actual is a nil error
-//Succeed is intended to be used with functions that return a single error value. Instead of
-// err := SomethingThatMightFail()
-// Expect(err).ShouldNot(HaveOccurred())
+// Succeed passes if actual is a nil error
+// Succeed is intended to be used with functions that return a single error value. Instead of
+//
+// err := SomethingThatMightFail()
+// Expect(err).ShouldNot(HaveOccurred())
//
-//You can write:
-// Expect(SomethingThatMightFail()).Should(Succeed())
+// You can write:
//
-//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
-//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
-//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
+// Expect(SomethingThatMightFail()).Should(Succeed())
+//
+// It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
+// functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
+// This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
func Succeed() types.GomegaMatcher {
return &matchers.SucceedMatcher{}
}
-//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
+// MatchError succeeds if actual is a non-nil error that matches the passed in
+// string, error, or matcher.
+//
+// These are valid use-cases:
//
-//These are valid use-cases:
// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
+// Expect(err).Should(MatchError(ContainsSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found"
//
-//It is an error for err to be nil or an object that does not implement the Error interface
+// It is an error for err to be nil or an object that does not implement the
+// Error interface
func MatchError(expected interface{}) types.GomegaMatcher {
return &matchers.MatchErrorMatcher{
Expected: expected,
}
}
-//BeClosed succeeds if actual is a closed channel.
-//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
+// BeClosed succeeds if actual is a closed channel.
+// It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
//
-//In order to check whether or not the channel is closed, Gomega must try to read from the channel
-//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
-//values coming down the channel.
+// In order to check whether or not the channel is closed, Gomega must try to read from the channel
+// (even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
+// values coming down the channel.
//
-//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
-//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
+// Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
+// asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
//
-//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
+// Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
func BeClosed() types.GomegaMatcher {
return &matchers.BeClosedMatcher{}
}
-//Receive succeeds if there is a value to be received on actual.
-//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
+// Receive succeeds if there is a value to be received on actual.
+// Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
//
-//Receive returns immediately and never blocks:
+// Receive returns immediately and never blocks:
//
-//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+// - If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
//
-//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+// - If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
//
-//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
+// - If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
//
-//If you have a go-routine running in the background that will write to channel `c` you can:
-// Eventually(c).Should(Receive())
+// If you have a go-routine running in the background that will write to channel `c` you can:
//
-//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
+// Eventually(c).Should(Receive())
//
-//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
-// Consistently(c).ShouldNot(Receive())
+// This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
//
-//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
-// Expect(c).Should(Receive(Equal("foo")))
+// A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
//
-//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
+// Consistently(c).ShouldNot(Receive())
//
-//Passing Receive a matcher is especially useful when paired with Eventually:
+// You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
//
-// Eventually(c).Should(Receive(ContainSubstring("bar")))
+// Expect(c).Should(Receive(Equal("foo")))
//
-//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+// When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
//
-//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
-// var myThing thing
-// Eventually(thingChan).Should(Receive(&myThing))
-// Expect(myThing.Sprocket).Should(Equal("foo"))
-// Expect(myThing.IsValid()).Should(BeTrue())
+// Passing Receive a matcher is especially useful when paired with Eventually:
+//
+// Eventually(c).Should(Receive(ContainSubstring("bar")))
+//
+// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+//
+// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing))
+// Expect(myThing.Sprocket).Should(Equal("foo"))
+// Expect(myThing.IsValid()).Should(BeTrue())
func Receive(args ...interface{}) types.GomegaMatcher {
var arg interface{}
if len(args) > 0 {
@@ -147,27 +169,27 @@ func Receive(args ...interface{}) types.GomegaMatcher {
}
}
-//BeSent succeeds if a value can be sent to actual.
-//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
-//In addition, actual must not be closed.
+// BeSent succeeds if a value can be sent to actual.
+// Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
+// In addition, actual must not be closed.
//
-//BeSent never blocks:
+// BeSent never blocks:
//
-//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
-//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
-//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
+// - If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
+// - If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
+// - If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
//
-//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
-//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
+// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
+// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
func BeSent(arg interface{}) types.GomegaMatcher {
return &matchers.BeSentMatcher{
Arg: arg,
}
}
-//MatchRegexp succeeds if actual is a string or stringer that matches the
-//passed-in regexp. Optional arguments can be provided to construct a regexp
-//via fmt.Sprintf().
+// MatchRegexp succeeds if actual is a string or stringer that matches the
+// passed-in regexp. Optional arguments can be provided to construct a regexp
+// via fmt.Sprintf().
func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
return &matchers.MatchRegexpMatcher{
Regexp: regexp,
@@ -175,9 +197,9 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
}
}
-//ContainSubstring succeeds if actual is a string or stringer that contains the
-//passed-in substring. Optional arguments can be provided to construct the substring
-//via fmt.Sprintf().
+// ContainSubstring succeeds if actual is a string or stringer that contains the
+// passed-in substring. Optional arguments can be provided to construct the substring
+// via fmt.Sprintf().
func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
return &matchers.ContainSubstringMatcher{
Substr: substr,
@@ -185,9 +207,9 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
}
}
-//HavePrefix succeeds if actual is a string or stringer that contains the
-//passed-in string as a prefix. Optional arguments can be provided to construct
-//via fmt.Sprintf().
+// HavePrefix succeeds if actual is a string or stringer that contains the
+// passed-in string as a prefix. Optional arguments can be provided to construct
+// via fmt.Sprintf().
func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
return &matchers.HavePrefixMatcher{
Prefix: prefix,
@@ -195,9 +217,9 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
}
}
-//HaveSuffix succeeds if actual is a string or stringer that contains the
-//passed-in string as a suffix. Optional arguments can be provided to construct
-//via fmt.Sprintf().
+// HaveSuffix succeeds if actual is a string or stringer that contains the
+// passed-in string as a suffix. Optional arguments can be provided to construct
+// via fmt.Sprintf().
func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
return &matchers.HaveSuffixMatcher{
Suffix: suffix,
@@ -205,136 +227,191 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
}
}
-//MatchJSON succeeds if actual is a string or stringer of JSON that matches
-//the expected JSON. The JSONs are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+// MatchJSON succeeds if actual is a string or stringer of JSON that matches
+// the expected JSON. The JSONs are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
func MatchJSON(json interface{}) types.GomegaMatcher {
return &matchers.MatchJSONMatcher{
JSONToMatch: json,
}
}
-//MatchXML succeeds if actual is a string or stringer of XML that matches
-//the expected XML. The XMLs are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like whitespaces shouldn't matter.
+// MatchXML succeeds if actual is a string or stringer of XML that matches
+// the expected XML. The XMLs are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like whitespaces shouldn't matter.
func MatchXML(xml interface{}) types.GomegaMatcher {
return &matchers.MatchXMLMatcher{
XMLToMatch: xml,
}
}
-//MatchYAML succeeds if actual is a string or stringer of YAML that matches
-//the expected YAML. The YAML's are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+// MatchYAML succeeds if actual is a string or stringer of YAML that matches
+// the expected YAML. The YAML's are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
func MatchYAML(yaml interface{}) types.GomegaMatcher {
return &matchers.MatchYAMLMatcher{
YAMLToMatch: yaml,
}
}
-//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
+// BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
func BeEmpty() types.GomegaMatcher {
return &matchers.BeEmptyMatcher{}
}
-//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
+// HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
func HaveLen(count int) types.GomegaMatcher {
return &matchers.HaveLenMatcher{
Count: count,
}
}
-//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
+// HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
func HaveCap(count int) types.GomegaMatcher {
return &matchers.HaveCapMatcher{
Count: count,
}
}
-//BeZero succeeds if actual is the zero value for its type or if actual is nil.
+// BeZero succeeds if actual is the zero value for its type or if actual is nil.
func BeZero() types.GomegaMatcher {
return &matchers.BeZeroMatcher{}
}
-//ContainElement succeeds if actual contains the passed in element.
-//By default ContainElement() uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
+// ContainElement succeeds if actual contains the passed in element. By default
+// ContainElement() uses Equal() to perform the match, however a matcher can be
+// passed in instead:
//
-//Actual must be an array, slice or map.
-//For maps, ContainElement searches through the map's values.
-func ContainElement(element interface{}) types.GomegaMatcher {
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
+//
+// Actual must be an array, slice or map. For maps, ContainElement searches
+// through the map's values.
+//
+// If you want to have a copy of the matching element(s) found you can pass a
+// pointer to a variable of the appropriate type. If the variable isn't a slice
+// or map, then exactly one match will be expected and returned. If the variable
+// is a slice or map, then at least one match is expected and all matches will be
+// stored in the variable.
+//
+// var findings []string
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
+func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
+ Result: result,
}
}
-//BeElementOf succeeds if actual is contained in the passed in elements.
-//BeElementOf() always uses Equal() to perform the match.
-//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves
-//as the reverse of ContainElement() that operates with Equal() to perform the match.
-// Expect(2).Should(BeElementOf([]int{1, 2}))
-// Expect(2).Should(BeElementOf([2]int{1, 2}))
-//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...):
-// Expect(2).Should(BeElementOf(1, 2))
+// BeElementOf succeeds if actual is contained in the passed in elements.
+// BeElementOf() always uses Equal() to perform the match.
+// When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves
+// as the reverse of ContainElement() that operates with Equal() to perform the match.
+//
+// Expect(2).Should(BeElementOf([]int{1, 2}))
+// Expect(2).Should(BeElementOf([2]int{1, 2}))
//
-//Actual must be typed.
+// Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...):
+//
+// Expect(2).Should(BeElementOf(1, 2))
+//
+// Actual must be typed.
func BeElementOf(elements ...interface{}) types.GomegaMatcher {
return &matchers.BeElementOfMatcher{
Elements: elements,
}
}
-//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
-//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+// BeKeyOf succeeds if actual is contained in the keys of the passed in map.
+// BeKeyOf() always uses Equal() to perform the match between actual and the map keys.
+//
+// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false}))
+func BeKeyOf(element interface{}) types.GomegaMatcher {
+ return &matchers.BeKeyOfMatcher{
+ Map: element,
+ }
+}
+
+// ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
+// By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
//
-//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
+// Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
//
-//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
-//is the only element passed in to ConsistOf:
+// You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
+// is the only element passed in to ConsistOf:
//
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
//
-//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
+// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
func ConsistOf(elements ...interface{}) types.GomegaMatcher {
return &matchers.ConsistOfMatcher{
Elements: elements,
}
}
-//ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter.
-//By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter.
+// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar"))
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar")))
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo")))
+//
+// Actual must be an array or slice.
+func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.HaveExactElementsMatcher{
+ Elements: elements,
+ }
+}
+
+// ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter.
+// By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
-// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar"))
-// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar"))
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo"))
//
-//Actual must be an array, slice or map.
-//For maps, ContainElements searches through the map's values.
+// Actual must be an array, slice or map.
+// For maps, ContainElements searches through the map's values.
func ContainElements(elements ...interface{}) types.GomegaMatcher {
return &matchers.ContainElementsMatcher{
Elements: elements,
}
}
-//HaveKey succeeds if actual is a map with the passed in key.
-//By default HaveKey uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
+// HaveEach succeeds if actual solely contains elements that match the passed in element.
+// Please note that if actual is empty, HaveEach always will succeed.
+// By default HaveEach() uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo")))
+//
+// Actual must be an array, slice or map.
+// For maps, HaveEach searches through the map's values.
+func HaveEach(element interface{}) types.GomegaMatcher {
+ return &matchers.HaveEachMatcher{
+ Element: element,
+ }
+}
+
+// HaveKey succeeds if actual is a map with the passed in key.
+// By default HaveKey uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
func HaveKey(key interface{}) types.GomegaMatcher {
return &matchers.HaveKeyMatcher{
Key: key,
}
}
-//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
-//By default HaveKeyWithValue uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
+// HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
+// By default HaveKeyWithValue uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
return &matchers.HaveKeyWithValueMatcher{
Key: key,
@@ -342,17 +419,79 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
}
}
-//BeNumerically performs numerical assertions in a type-agnostic way.
-//Actual and expected should be numbers, though the specific type of
-//number is irrelevant (float32, float64, uint8, etc...).
+// HaveField succeeds if actual is a struct and the value at the passed in field
+// matches the passed in matcher. By default HaveField used Equal() to perform the match,
+// however a matcher can be passed in in stead.
+//
+// The field must be a string that resolves to the name of a field in the struct. Structs can be traversed
+// using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked.
+// Such methods must take no arguments and return a single value:
+//
+// type Book struct {
+// Title string
+// Author Person
+// }
+// type Person struct {
+// FirstName string
+// LastName string
+// DOB time.Time
+// }
+// Expect(book).To(HaveField("Title", "Les Miserables"))
+// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
+// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
+// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
+func HaveField(field string, expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveFieldMatcher{
+ Field: field,
+ Expected: expected,
+ }
+}
+
+// HaveExistingField succeeds if actual is a struct and the specified field
+// exists.
+//
+// HaveExistingField can be combined with HaveField in order to cover use cases
+// with optional fields. HaveField alone would trigger an error in such situations.
+//
+// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain")))
+func HaveExistingField(field string) types.GomegaMatcher {
+ return &matchers.HaveExistingFieldMatcher{
+ Field: field,
+ }
+}
+
+// HaveValue applies the given matcher to the value of actual, optionally and
+// repeatedly dereferencing pointers or taking the concrete value of interfaces.
+// Thus, the matcher will always be applied to non-pointer and non-interface
+// values only. HaveValue will fail with an error if a pointer or interface is
+// nil. It will also fail for more than 31 pointer or interface dereferences to
+// guard against mistakenly applying it to arbitrarily deep linked pointers.
+//
+// HaveValue differs from gstruct.PointTo in that it does not expect actual to
+// be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer
+// and even interface values.
+//
+// actual := 42
+// Expect(actual).To(HaveValue(42))
+// Expect(&actual).To(HaveValue(42))
+func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.HaveValueMatcher{
+ Matcher: matcher,
+ }
+}
+
+// BeNumerically performs numerical assertions in a type-agnostic way.
+// Actual and expected should be numbers, though the specific type of
+// number is irrelevant (float32, float64, uint8, etc...).
//
-//There are six, self-explanatory, supported comparators:
-// Expect(1.0).Should(BeNumerically("==", 1))
-// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
-// Expect(1.0).Should(BeNumerically(">", 0.9))
-// Expect(1.0).Should(BeNumerically(">=", 1.0))
-// Expect(1.0).Should(BeNumerically("<", 3))
-// Expect(1.0).Should(BeNumerically("<=", 1.0))
+// There are six, self-explanatory, supported comparators:
+//
+// Expect(1.0).Should(BeNumerically("==", 1))
+// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
+// Expect(1.0).Should(BeNumerically(">", 0.9))
+// Expect(1.0).Should(BeNumerically(">=", 1.0))
+// Expect(1.0).Should(BeNumerically("<", 3))
+// Expect(1.0).Should(BeNumerically("<=", 1.0))
func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
return &matchers.BeNumericallyMatcher{
Comparator: comparator,
@@ -360,10 +499,11 @@ func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatc
}
}
-//BeTemporally compares time.Time's like BeNumerically
-//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
-// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
-// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
+// BeTemporally compares time.Time's like BeNumerically
+// Actual and expected must be time.Time. The comparators are the same as for BeNumerically
+//
+// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
+// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
return &matchers.BeTemporallyMatcher{
Comparator: comparator,
@@ -372,58 +512,61 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura
}
}
-//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
-//It will return an error when one of the values is nil.
-// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
-// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
-// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
-// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
+// BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
+// It will return an error when one of the values is nil.
+//
+// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
+// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
+// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
+// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
return &matchers.AssignableToTypeOfMatcher{
Expected: expected,
}
}
-//Panic succeeds if actual is a function that, when invoked, panics.
-//Actual must be a function that takes no arguments and returns no results.
+// Panic succeeds if actual is a function that, when invoked, panics.
+// Actual must be a function that takes no arguments and returns no results.
func Panic() types.GomegaMatcher {
return &matchers.PanicMatcher{}
}
-//PanicWith succeeds if actual is a function that, when invoked, panics with a specific value.
-//Actual must be a function that takes no arguments and returns no results.
+// PanicWith succeeds if actual is a function that, when invoked, panics with a specific value.
+// Actual must be a function that takes no arguments and returns no results.
+//
+// By default PanicWith uses Equal() to perform the match, however a
+// matcher can be passed in instead:
//
-//By default PanicWith uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
+// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
func PanicWith(expected interface{}) types.GomegaMatcher {
return &matchers.PanicMatcher{Expected: expected}
}
-//BeAnExistingFile succeeds if a file exists.
-//Actual must be a string representing the abs path to the file being checked.
+// BeAnExistingFile succeeds if a file exists.
+// Actual must be a string representing the abs path to the file being checked.
func BeAnExistingFile() types.GomegaMatcher {
return &matchers.BeAnExistingFileMatcher{}
}
-//BeARegularFile succeeds if a file exists and is a regular file.
-//Actual must be a string representing the abs path to the file being checked.
+// BeARegularFile succeeds if a file exists and is a regular file.
+// Actual must be a string representing the abs path to the file being checked.
func BeARegularFile() types.GomegaMatcher {
return &matchers.BeARegularFileMatcher{}
}
-//BeADirectory succeeds if a file exists and is a directory.
-//Actual must be a string representing the abs path to the file being checked.
+// BeADirectory succeeds if a file exists and is a directory.
+// Actual must be a string representing the abs path to the file being checked.
func BeADirectory() types.GomegaMatcher {
return &matchers.BeADirectoryMatcher{}
}
-//HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches.
-//Actual must be either a *http.Response or *httptest.ResponseRecorder.
-//Expected must be either an int or a string.
-// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
-// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
-// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
+// HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be either an int or a string.
+//
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
+// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
return &matchers.HaveHTTPStatusMatcher{Expected: expected}
}
@@ -446,58 +589,70 @@ func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
return &matchers.HaveHTTPBodyMatcher{Expected: expected}
}
-//And succeeds only if all of the given matchers succeed.
-//The matchers are tried in order, and will fail-fast if one doesn't succeed.
-// Expect("hi").To(And(HaveLen(2), Equal("hi"))
+// And succeeds only if all of the given matchers succeed.
+// The matchers are tried in order, and will fail-fast if one doesn't succeed.
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// Expect("hi").To(And(HaveLen(2), Equal("hi"))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
return &matchers.AndMatcher{Matchers: ms}
}
-//SatisfyAll is an alias for And().
-// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
+// SatisfyAll is an alias for And().
+//
+// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
return And(matchers...)
}
-//Or succeeds if any of the given matchers succeed.
-//The matchers are tried in order and will return immediately upon the first successful match.
-// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
+// Or succeeds if any of the given matchers succeed.
+// The matchers are tried in order and will return immediately upon the first successful match.
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
return &matchers.OrMatcher{Matchers: ms}
}
-//SatisfyAny is an alias for Or().
-// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
+// SatisfyAny is an alias for Or().
+//
+// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
return Or(matchers...)
}
-//Not negates the given matcher; it succeeds if the given matcher fails.
-// Expect(1).To(Not(Equal(2))
+// Not negates the given matcher; it succeeds if the given matcher fails.
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// Expect(1).To(Not(Equal(2))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
return &matchers.NotMatcher{Matcher: matcher}
}
-//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
-//The given transform must be a function of one parameter that returns one value.
-// var plus1 = func(i int) int { return i + 1 }
-// Expect(1).To(WithTransform(plus1, Equal(2))
+// WithTransform applies the `transform` to the actual value and matches it against `matcher`.
+// The given transform must be either a function of one parameter that returns one value or a
+// function of one parameter that returns two values, where the second value must be of the
+// error type.
+//
+// var plus1 = func(i int) int { return i + 1 }
+// Expect(1).To(WithTransform(plus1, Equal(2))
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" }
+// Expect(1).To(WithTransform(failingplus1, Equal(2)))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
}
-//Satisfy matches the actual value against the `predicate` function.
-//The given predicate must be a function of one paramter that returns bool.
-// var isEven = func(i int) bool { return i%2 == 0 }
-// Expect(2).To(Satisfy(isEven))
+// Satisfy matches the actual value against the `predicate` function.
+// The given predicate must be a function of one paramter that returns bool.
+//
+// var isEven = func(i int) bool { return i%2 == 0 }
+// Expect(2).To(Satisfy(isEven))
func Satisfy(predicate interface{}) types.GomegaMatcher {
return matchers.NewSatisfyMatcher(predicate)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
new file mode 100644
index 000000000..8ab4bb919
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
@@ -0,0 +1,49 @@
+package matchers
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/onsi/gomega/format"
+)
+
+type BeComparableToMatcher struct {
+ Expected interface{}
+ Options cmp.Options
+}
+
+func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ }
+ // Shortcut for byte slices.
+ // Comparing long byte slices with reflect.DeepEqual is very slow,
+ // so use bytes.Equal if actual and expected are both byte slices.
+ if actualByteSlice, ok := actual.([]byte); ok {
+ if expectedByteSlice, ok := matcher.Expected.([]byte); ok {
+ return bytes.Equal(actualByteSlice, expectedByteSlice), nil
+ }
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ success = false
+ if err, ok := r.(error); ok {
+ matchErr = err
+ } else if errMsg, ok := r.(string); ok {
+ matchErr = fmt.Errorf(errMsg)
+ }
+ }
+ }()
+
+ return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil
+}
+
+func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) {
+ return cmp.Diff(matcher.Expected, actual, matcher.Options)
+}
+
+func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to equal", matcher.Expected)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
new file mode 100644
index 000000000..449a291ef
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
@@ -0,0 +1,45 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeKeyOfMatcher struct {
+ Map interface{}
+}
+
+func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isMap(matcher.Map) {
+ return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type")
+ }
+
+ if reflect.TypeOf(actual) == nil {
+ return false, fmt.Errorf("BeKeyOf matcher expects actual to be typed")
+ }
+
+ var lastError error
+ for _, key := range reflect.ValueOf(matcher.Map).MapKeys() {
+ matcher := &EqualMatcher{Expected: key.Interface()}
+ success, err := matcher.Match(actual)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ if success {
+ return true, nil
+ }
+ }
+
+ return false, lastError
+}
+
+func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map)))
+}
+
+func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map)))
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go
index e8ef0dee1..f69037a4f 100644
--- a/vendor/github.com/onsi/gomega/matchers/consist_of.go
+++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -48,11 +48,13 @@ func neighbours(value, matcher interface{}) (bool, error) {
func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
for _, matcher := range matchers {
- equalMatcher, ok := matcher.(*EqualMatcher)
- if ok {
- matcher = equalMatcher.Expected
+ if equalMatcher, ok := matcher.(*EqualMatcher); ok {
+ elements = append(elements, equalMatcher.Expected)
+ } else if _, ok := matcher.(*BeNilMatcher); ok {
+ elements = append(elements, nil)
+ } else {
+ elements = append(elements, matcher)
}
- elements = append(elements, matcher)
}
return
}
@@ -72,11 +74,13 @@ func flatten(elems []interface{}) []interface{} {
func matchers(expectedElems []interface{}) (matchers []interface{}) {
for _, e := range flatten(expectedElems) {
- matcher, isMatcher := e.(omegaMatcher)
- if !isMatcher {
- matcher = &EqualMatcher{Expected: e}
+ if e == nil {
+ matchers = append(matchers, &BeNilMatcher{})
+ } else if matcher, isMatcher := e.(omegaMatcher); isMatcher {
+ matchers = append(matchers, matcher)
+ } else {
+ matchers = append(matchers, &EqualMatcher{Expected: e})
}
- matchers = append(matchers, matcher)
}
return
}
@@ -89,9 +93,14 @@ func presentable(elems []interface{}) interface{} {
}
sv := reflect.ValueOf(elems)
- tt := sv.Index(0).Elem().Type()
+ firstEl := sv.Index(0)
+ if firstEl.IsNil() {
+ return elems
+ }
+ tt := firstEl.Elem().Type()
for i := 1; i < sv.Len(); i++ {
- if sv.Index(i).Elem().Type() != tt {
+ el := sv.Index(i)
+ if el.IsNil() || (sv.Index(i).Elem().Type() != tt) {
return elems
}
}
diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
index 8d6c44c7a..3d45c9ebc 100644
--- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -3,6 +3,7 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
@@ -11,6 +12,7 @@ import (
type ContainElementMatcher struct {
Element interface{}
+ Result []interface{}
}
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
@@ -18,6 +20,49 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
}
+ var actualT reflect.Type
+ var result reflect.Value
+ switch l := len(matcher.Result); {
+ case l > 1:
+ return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at")
+ case l == 1:
+ if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr {
+ return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s",
+ format.Object(matcher.Result[0], 1))
+ }
+ actualT = reflect.TypeOf(actual)
+ resultReference := matcher.Result[0]
+ result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings
+ switch result.Kind() {
+ case reflect.Array:
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.SliceOf(actualT.Elem()).String(), result.Type().String())
+ case reflect.Slice:
+ if !isArrayOrSlice(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String())
+ }
+ if !actualT.Elem().AssignableTo(result.Type().Elem()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ case reflect.Map:
+ if !isMap(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ if !actualT.AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ default:
+ if !actualT.Elem().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.Elem().String(), result.Type().String())
+ }
+ }
+ }
+
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
if !elementIsMatcher {
elemMatcher = &EqualMatcher{Expected: matcher.Element}
@@ -25,30 +70,99 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
value := reflect.ValueOf(actual)
var valueAt func(int) interface{}
+
+ var getFindings func() reflect.Value
+ var foundAt func(int)
+
if isMap(actual) {
keys := value.MapKeys()
valueAt = func(i int) interface{} {
return value.MapIndex(keys[i]).Interface()
}
+ if result.Kind() != reflect.Invalid {
+ fm := reflect.MakeMap(actualT)
+ getFindings = func() reflect.Value {
+ return fm
+ }
+ foundAt = func(i int) {
+ fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
+ }
+ }
} else {
valueAt = func(i int) interface{} {
return value.Index(i).Interface()
}
+ if result.Kind() != reflect.Invalid {
+ var f reflect.Value
+ if result.Kind() == reflect.Slice {
+ f = reflect.MakeSlice(result.Type(), 0, 0)
+ } else {
+ f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
+ }
+ getFindings = func() reflect.Value {
+ return f
+ }
+ foundAt = func(i int) {
+ f = reflect.Append(f, value.Index(i))
+ }
+ }
}
var lastError error
for i := 0; i < value.Len(); i++ {
- success, err := elemMatcher.Match(valueAt(i))
+ elem := valueAt(i)
+ success, err := elemMatcher.Match(elem)
if err != nil {
lastError = err
continue
}
if success {
- return true, nil
+ if result.Kind() == reflect.Invalid {
+ return true, nil
+ }
+ foundAt(i)
}
}
- return false, lastError
+ // when the expectation isn't interested in the findings except for success
+ // or non-success, then we're done here and return the last matcher error
+ // seen, if any, as well as non-success.
+ if result.Kind() == reflect.Invalid {
+ return false, lastError
+ }
+
+ // pick up any findings the test is interested in as it specified a non-nil
+ // result reference. However, the expection always is that there are at
+ // least one or multiple findings. So, if a result is expected, but we had
+ // no findings, then this is an error.
+ findings := getFindings()
+ if findings.Len() == 0 {
+ return false, lastError
+ }
+
+ // there's just a single finding and the result is neither a slice nor a map
+ // (so it's a scalar): pick the one and only finding and return it in the
+ // place the reference points to.
+ if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) {
+ if isMap(actual) {
+ miter := findings.MapRange()
+ miter.Next()
+ result.Set(miter.Value())
+ } else {
+ result.Set(findings.Index(0))
+ }
+ return true, nil
+ }
+
+ // at least one or even multiple findings and a the result references a
+ // slice or a map, so all we need to do is to store our findings where the
+ // reference points to.
+ if !findings.Type().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s",
+ findings.Type().String(), result.Type().String())
+ }
+ result.Set(findings)
+ return true, nil
}
func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
new file mode 100644
index 000000000..025b6e1ac
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveEachMatcher struct {
+ Element interface{}
+}
+
+func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+ if !elementIsMatcher {
+ elemMatcher = &EqualMatcher{Expected: matcher.Element}
+ }
+
+ value := reflect.ValueOf(actual)
+ if value.Len() == 0 {
+ return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ var valueAt func(int) interface{}
+ if isMap(actual) {
+ keys := value.MapKeys()
+ valueAt = func(i int) interface{} {
+ return value.MapIndex(keys[i]).Interface()
+ }
+ } else {
+ valueAt = func(i int) interface{} {
+ return value.Index(i).Interface()
+ }
+ }
+
+ // if there are no elements, then HaveEach will match.
+ for i := 0; i < value.Len(); i++ {
+ success, err := elemMatcher.Match(valueAt(i))
+ if err != nil {
+ return false, err
+ }
+ if !success {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// FailureMessage returns a suitable failure message.
+func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+// NegatedFailureMessage returns a suitable negated failure message.
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
new file mode 100644
index 000000000..7cce776c1
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -0,0 +1,83 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type mismatchFailure struct {
+ failure string
+ index int
+}
+
+type HaveExactElementsMatcher struct {
+ Elements []interface{}
+ mismatchFailures []mismatchFailure
+ missingIndex int
+ extraIndex int
+}
+
+func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.resetState()
+
+ if isMap(actual) {
+ return false, fmt.Errorf("error")
+ }
+
+ matchers := matchers(matcher.Elements)
+ values := valuesOf(actual)
+
+ lenMatchers := len(matchers)
+ lenValues := len(values)
+
+ for i := 0; i < lenMatchers || i < lenValues; i++ {
+ if i >= lenMatchers {
+ matcher.extraIndex = i
+ continue
+ }
+
+ if i >= lenValues {
+ matcher.missingIndex = i
+ return
+ }
+
+ elemMatcher := matchers[i].(omegaMatcher)
+ match, err := elemMatcher.Match(values[i])
+ if err != nil || !match {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: elemMatcher.FailureMessage(values[i]),
+ })
+ }
+ }
+
+ return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil
+}
+
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+ message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
+ if matcher.missingIndex > 0 {
+ message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
+ }
+ if matcher.extraIndex > 0 {
+ message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex)
+ }
+ if len(matcher.mismatchFailures) != 0 {
+ message = fmt.Sprintf("%s\nthe mismatch indexes were:", message)
+ }
+ for _, mismatch := range matcher.mismatchFailures {
+ message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure)
+ }
+ return
+}
+
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
+}
+
+func (matcher *HaveExactElementsMatcher) resetState() {
+ matcher.mismatchFailures = nil
+ matcher.missingIndex = 0
+ matcher.extraIndex = 0
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
new file mode 100644
index 000000000..b57018745
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -0,0 +1,36 @@
+package matchers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveExistingFieldMatcher struct {
+ Field string
+}
+
+func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ // we don't care about the field's actual value, just about any error in
+ // trying to find the field (or method).
+ _, err = extractField(actual, matcher.Field, "HaveExistingField")
+ if err == nil {
+ return true, nil
+ }
+ var mferr missingFieldError
+ if errors.As(err, &mferr) {
+ // missing field errors aren't errors in this context, but instead
+ // unsuccessful matches.
+ return false, nil
+ }
+ return false, err
+}
+
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
+}
+
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go
new file mode 100644
index 000000000..6989f78c4
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -0,0 +1,99 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+)
+
+// missingFieldError represents a missing field extraction error that
+// HaveExistingFieldMatcher can ignore, as opposed to other, sever field
+// extraction errors, such as nil pointers, et cetera.
+type missingFieldError string
+
+func (e missingFieldError) Error() string {
+ return string(e)
+}
+
+func extractField(actual interface{}, field string, matchername string) (interface{}, error) {
+ fields := strings.SplitN(field, ".", 2)
+ actualValue := reflect.ValueOf(actual)
+
+ if actualValue.Kind() == reflect.Ptr {
+ actualValue = actualValue.Elem()
+ }
+ if actualValue == (reflect.Value{}) {
+ return nil, fmt.Errorf("%s encountered nil while dereferencing a pointer of type %T.", matchername, actual)
+ }
+
+ if actualValue.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%s encountered:\n%s\nWhich is not a struct.", matchername, format.Object(actual, 1))
+ }
+
+ var extractedValue reflect.Value
+
+ if strings.HasSuffix(fields[0], "()") {
+ extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()"))
+ if extractedValue == (reflect.Value{}) && actualValue.CanAddr() {
+ extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()"))
+ }
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
+ }
+ t := extractedValue.Type()
+ if t.NumIn() != 0 || t.NumOut() != 1 {
+ return nil, fmt.Errorf("%s found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", matchername, fields[0], actual)
+ }
+ extractedValue = extractedValue.Call([]reflect.Value{})[0]
+ } else {
+ extractedValue = actualValue.FieldByName(fields[0])
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find field named '%s' in struct:\n%s", matchername, fields[0], format.Object(actual, 1)))
+ }
+ }
+
+ if len(fields) == 1 {
+ return extractedValue.Interface(), nil
+ } else {
+ return extractField(extractedValue.Interface(), fields[1], matchername)
+ }
+}
+
+type HaveFieldMatcher struct {
+ Field string
+ Expected interface{}
+
+ extractedField interface{}
+ expectedMatcher omegaMatcher
+}
+
+func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField")
+ if err != nil {
+ return false, err
+ }
+
+ var isMatcher bool
+ matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher)
+ if !isMatcher {
+ matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+
+ return matcher.expectedMatcher.Match(matcher.extractedField)
+}
+
+func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field)
+ message += matcher.expectedMatcher.FailureMessage(matcher.extractedField)
+
+ return message
+}
+
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field)
+ message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField)
+
+ return message
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index 66cbb254a..6a3dcdc35 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -2,11 +2,11 @@ package matchers
import (
"fmt"
- "io/ioutil"
"net/http"
"net/http/httptest"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
"github.com/onsi/gomega/types"
)
@@ -81,7 +81,7 @@ func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
if a.Body != nil {
defer a.Body.Close()
var err error
- matcher.cachedBody, err = ioutil.ReadAll(a.Body)
+ matcher.cachedBody, err = gutil.ReadAll(a.Body)
if err != nil {
return nil, fmt.Errorf("error reading response body: %w", err)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 70f54899a..0f66e46ec 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -2,13 +2,13 @@ package matchers
import (
"fmt"
- "io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
)
type HaveHTTPStatusMatcher struct {
@@ -78,7 +78,7 @@ func formatHttpResponse(input interface{}) string {
body := ""
if resp.Body != nil {
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := gutil.ReadAll(resp.Body)
if err != nil {
data = []byte("")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index 5bcfdd2ad..22a1b6730 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -31,5 +31,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message
}
func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred")
+ return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go
new file mode 100644
index 000000000..f67252835
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+const maxIndirections = 31
+
+type HaveValueMatcher struct {
+ Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
+ resolvedActual interface{} // the ("resolved") value.
+}
+
+func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+ val := reflect.ValueOf(actual)
+ for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
+ // return an error if value isn't valid. Please note that we cannot
+ // check for nil here, as we might not deal with a pointer or interface
+ // at this point.
+ if !val.IsValid() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ switch val.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ // resolve pointers and interfaces to their values, then rinse and
+ // repeat.
+ if val.IsNil() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ val = val.Elem()
+ continue
+ default:
+ // forward the final value to the specified matcher.
+ m.resolvedActual = val.Interface()
+ return m.Matcher.Match(m.resolvedActual)
+ }
+ }
+ // too many indirections: extreme star gazing, indeed...?
+ return false, errors.New(format.Message(actual, "too many indirections"))
+}
+
+func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+ return m.Matcher.FailureMessage(m.resolvedActual)
+}
+
+func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(m.resolvedActual)
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index c8993a86d..827475ea5 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -25,7 +25,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e
expected := matcher.Expected
if isError(expected) {
- return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil
+ // first try the built-in errors.Is
+ if errors.Is(actualErr, expected.(error)) {
+ return true, nil
+ }
+ // if not, try DeepEqual along the error chain
+ for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) {
+ if reflect.DeepEqual(unwrapped, expected) {
+ return true, nil
+ }
+ }
+ return false, nil
}
if isString(expected) {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
index 0c83c2b63..2cb6b47db 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -5,7 +5,7 @@ import (
"strings"
"github.com/onsi/gomega/format"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
type MatchYAMLMatcher struct {
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
index 721ed5529..327350f7b 100644
--- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -1,11 +1,16 @@
package matchers
import (
+ "errors"
"fmt"
"github.com/onsi/gomega/format"
)
+type formattedGomegaError interface {
+ FormattedGomegaError() string
+}
+
type SucceedMatcher struct {
}
@@ -25,7 +30,11 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro
}
func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
- return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1))
+ var fgErr formattedGomegaError
+ if errors.As(actual.(error), &fgErr) {
+ return fgErr.FormattedGomegaError()
+ }
+ return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
}
func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 8a06bd384..6f743b1b3 100644
--- a/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -9,7 +9,7 @@ import (
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value
+ Transform interface{} // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
@@ -19,6 +19,9 @@ type WithTransformMatcher struct {
transformedValue interface{}
}
+// reflect.Type for error
+var errorT = reflect.TypeOf((*error)(nil)).Elem()
+
func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
@@ -27,8 +30,10 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
if txType.NumIn() != 1 {
panic("transform function must have 1 argument")
}
- if txType.NumOut() != 1 {
- panic("transform function must have 1 return value")
+ if numout := txType.NumOut(); numout != 1 {
+ if numout != 2 || !txType.Out(1).AssignableTo(errorT) {
+ panic("transform function must either have 1 return value, or 1 return value plus 1 error value")
+ }
}
return &WithTransformMatcher{
@@ -57,6 +62,11 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
// call the Transform function with `actual`
fn := reflect.ValueOf(m.Transform)
result := fn.Call([]reflect.Value{param})
+ if len(result) == 2 {
+ if !result[1].IsNil() {
+ return false, fmt.Errorf("Transform function failed: %s", result[1].Interface().(error).Error())
+ }
+ }
m.transformedValue = result[0].Interface() // expect exactly one value
return m.Matcher.Match(m.transformedValue)
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
index c75fcb3cc..7c7adb941 100644
--- a/vendor/github.com/onsi/gomega/types/types.go
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -1,12 +1,13 @@
package types
import (
+ "context"
"time"
)
type GomegaFailHandler func(message string, callerSkip ...int)
-//A simple *testing.T interface wrapper
+// A simple *testing.T interface wrapper
type GomegaTestingT interface {
Helper()
Fatalf(format string, args ...interface{})
@@ -18,11 +19,11 @@ type Gomega interface {
Expect(actual interface{}, extra ...interface{}) Assertion
ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
- Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion
- EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion
+ Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion
- ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion
+ Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
SetDefaultEventuallyTimeout(time.Duration)
SetDefaultEventuallyPollingInterval(time.Duration)
@@ -30,9 +31,9 @@ type Gomega interface {
SetDefaultConsistentlyPollingInterval(time.Duration)
}
-//All Gomega matchers must implement the GomegaMatcher interface
+// All Gomega matchers must implement the GomegaMatcher interface
//
-//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
+// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
type GomegaMatcher interface {
Match(actual interface{}) (success bool, err error)
FailureMessage(actual interface{}) (message string)
@@ -66,6 +67,15 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
type AsyncAssertion interface {
Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) AsyncAssertion
+ WithTimeout(interval time.Duration) AsyncAssertion
+ WithPolling(interval time.Duration) AsyncAssertion
+ Within(timeout time.Duration) AsyncAssertion
+ ProbeEvery(interval time.Duration) AsyncAssertion
+ WithContext(ctx context.Context) AsyncAssertion
+ WithArguments(argsToForward ...interface{}) AsyncAssertion
+ MustPassRepeatedly(count int) AsyncAssertion
}
// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
@@ -76,4 +86,8 @@ type Assertion interface {
To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) Assertion
+
+ Error() Assertion
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index de30de6da..a912b75a0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -140,12 +140,13 @@ func (c *counter) get() float64 {
}
func (c *counter) Write(out *dto.Metric) error {
- val := c.get()
-
+ // Read the Exemplar first and the value second. This is to avoid a race condition
+ // where users see an exemplar for a not-yet-existing observation.
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
+ val := c.get()
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
}
@@ -245,7 +246,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -256,7 +258,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *CounterVec) With(labels Labels) Counter {
c, err := v.GetMetricWith(labels)
if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 98450125d..811072cbd 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -21,55 +21,66 @@
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
-// A Basic Example
+// # A Basic Example
//
// As a starting point, a very basic usage example:
//
-// package main
-//
-// import (
-// "log"
-// "net/http"
-//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
-//
-// var (
-// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// })
-// hdFailures = prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// )
-// )
-//
-// func init() {
-// // Metrics have to be registered to be exposed:
-// prometheus.MustRegister(cpuTemp)
-// prometheus.MustRegister(hdFailures)
-// }
-//
-// func main() {
-// cpuTemp.Set(65.3)
-// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-// // The Handler function provides a default handler to expose metrics
-// // via an HTTP server. "/metrics" is the usual endpoint for that.
-// http.Handle("/metrics", promhttp.Handler())
-// log.Fatal(http.ListenAndServe(":8080", nil))
-// }
-//
+// package main
+//
+// import (
+// "log"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// type metrics struct {
+// cpuTemp prometheus.Gauge
+// hdFailures *prometheus.CounterVec
+// }
+//
+// func NewMetrics(reg prometheus.Registerer) *metrics {
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
+// }
+//
+// func main() {
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
+//
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
+// m.cpuTemp.Set(65.3)
+// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // Expose metrics and custom registry via an HTTP server
+// // using the HandleFor function. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
//
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
//
-// Metrics
+// # Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
@@ -100,7 +111,7 @@
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
-// Custom Collectors and constant Metrics
+// # Custom Collectors and constant Metrics
//
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
@@ -141,7 +152,7 @@
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
-// Advanced Uses of the Registry
+// # Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might cause.
@@ -176,23 +187,23 @@
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
-// HTTP Exposition
+// # HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
//
-// Pushing to the Pushgateway
+// # Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
-// Graphite Bridge
+// # Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
-// Other Means of Exposition
+// # Other Means of Exposition
//
// More ways of exposing metrics can easily be added by following the approaches
// of the existing implementations.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index bd0733d6a..21271a5bb 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -210,7 +210,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
g, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -221,7 +222,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *GaugeVec) With(labels Labels) Gauge {
g, err := v.GetMetricWith(labels)
if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 0d47fecdc..4c873a01c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -28,19 +28,216 @@ import (
dto "github.com/prometheus/client_model/go"
)
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+ // Schema "0":
+ {0.5},
+ // Schema 1:
+ {0.5, 0.7071067811865475},
+ // Schema 2:
+ {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+ // Schema 3:
+ {
+ 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+ 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+ },
+ // Schema 4:
+ {
+ 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+ 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+ 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+ 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+ },
+ // Schema 5:
+ {
+ 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+ 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+ 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+ 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+ 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+ 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+ 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+ 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+ },
+ // Schema 6:
+ {
+ 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+ 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+ 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+ 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+ 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+ 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+ 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+ 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+ 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+ 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+ 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+ 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+ 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+ 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+ 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+ 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+ },
+ // Schema 7:
+ {
+ 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+ 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+ 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+ 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+ 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+ 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+ 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+ 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+ 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+ 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+ 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+ 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+ 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+ 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+ 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+ 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+ 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+ 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+ 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+ 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+ 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+ 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+ 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+ 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+ 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+ 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+ 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+ 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+ 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+ 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+ 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+ 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+ },
+ // Schema 8:
+ {
+ 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+ 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+ 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+ 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+ 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+ 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+ 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+ 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+ 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+ 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+ 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+ 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+ 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+ 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+ 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+ 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+ 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+ 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+ 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+ 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+ 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+ 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+ 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+ 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+ 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+ 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+ 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+ 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+ 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+ 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+ 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+ 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+ 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+ 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+ 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+ 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+ 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+ 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+ 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+ 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+ 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+ 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+ 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+ 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+ 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+ 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+ 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+ 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+ 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+ 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+ 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+ 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+ 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+ 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+ 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+ 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+ 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+ 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+ 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+ 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+ 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+ 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+ 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+ },
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// // Populate nativeHistogramBounds.
+// numBuckets := 1
+// for i := range nativeHistogramBounds {
+// bounds := []float64{0.5}
+// factor := math.Exp2(math.Exp2(float64(-i)))
+// for j := 0; j < numBuckets-1; j++ {
+// var bound float64
+// if (j+1)%2 == 0 {
+// // Use previously calculated value for increased precision.
+// bound = nativeHistogramBounds[i-1][j/2+1]
+// } else {
+// bound = bounds[j] * factor
+// }
+// bounds = append(bounds, bound)
+// }
+// numBuckets *= 2
+// nativeHistogramBounds[i] = bounds
+// }
+// }
+
// A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
//
// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
+// the histogram_quantile PromQL function.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
//
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
//
// To create Histogram instances, use NewHistogram.
type Histogram interface {
@@ -50,7 +247,8 @@ type Histogram interface {
// Observe adds a single observation to the histogram. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. See
+ // counter resets in the sum of observations. (The experimental Native
+ // Histograms handle negative observations properly.) See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64)
@@ -64,18 +262,28 @@ const bucketLabel = "le"
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
-var (
- DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
- errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
- )
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
)
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is zero or negative.
func LinearBuckets(start, width float64, count int) []float64 {
@@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 {
return buckets
}
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
// or if 'factor' is less than or equal 1.
@@ -180,8 +388,85 @@ type HistogramOpts struct {
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
- // implicitly. The default value is DefBuckets.
+ // implicitly. If Buckets is left as nil or set to a slice of length
+ // zero, it is replaced by default buckets. The default buckets are
+ // DefBuckets if no buckets for a native histogram (see below) are used,
+ // otherwise the default is no buckets. (In other words, if you want to
+ // use both reguler buckets and buckets for a native histogram, you have
+ // to define the regular buckets here explicitly.)
Buckets []float64
+
+ // If NativeHistogramBucketFactor is greater than one, so-called sparse
+ // buckets are used (in addition to the regular buckets, if defined
+ // above). A Histogram with sparse buckets will be ingested as a Native
+ // Histogram by a Prometheus server with that feature enabled (requires
+ // Prometheus v2.40+). Sparse buckets are exponential buckets covering
+ // the whole float64 range (with the exception of the “zero” bucket, see
+ // SparseBucketsZeroThreshold below). From any one bucket to the next,
+ // the width of the bucket grows by a constant
+ // factor. NativeHistogramBucketFactor provides an upper bound for this
+ // factor (exception see below). The smaller
+ // NativeHistogramBucketFactor, the more buckets will be used and thus
+ // the more costly the histogram will become. A generally good trade-off
+ // between cost and accuracy is a value of 1.1 (each bucket is at most
+ // 10% wider than the previous one), which will result in each power of
+ // two divided into 8 buckets (e.g. there will be 8 buckets between 1
+ // and 2, same as between 2 and 4, and 4 and 8, etc.).
+ //
+ // Details about the actually used factor: The factor is calculated as
+ // 2^(2^n), where n is an integer number between (and including) -8 and
+ // 4. n is chosen so that the resulting factor is the largest that is
+ // still smaller or equal to NativeHistogramBucketFactor. Note that the
+ // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+ // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+ // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+ // it is larger than the provided NativeHistogramBucketFactor.
+ //
+ // NOTE: Native Histograms are still an experimental feature. Their
+ // behavior might still change without a major version
+ // bump. Subsequently, all NativeHistogram... options here might still
+ // change their behavior or name (or might completely disappear) without
+ // a major version bump.
+ NativeHistogramBucketFactor float64
+ // All observations with an absolute value of less or equal
+ // NativeHistogramZeroThreshold are accumulated into a “zero”
+ // bucket. For best results, this should be close to a bucket
+ // boundary. This is usually the case if picking a power of two. If
+ // NativeHistogramZeroThreshold is left at zero,
+ // DefSparseBucketsZeroThreshold is used as the threshold. To configure
+ // a zero bucket with an actual threshold of zero (i.e. only
+ // observations of precisely zero will go into the zero bucket), set
+ // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+ // constant (or any negative float value).
+ NativeHistogramZeroThreshold float64
+
+ // The remaining fields define a strategy to limit the number of
+ // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+ // at zero, the number of buckets is not limited. (Note that this might
+ // lead to unbounded memory consumption if the values observed by the
+ // Histogram are sufficiently wide-spread. In particular, this could be
+ // used as a DoS attack vector. Where the observed values depend on
+ // external inputs, it is highly recommended to set a
+ // NativeHistogramMaxBucketNumber.) Once the set
+ // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+ // enacted: First, if the last reset (or the creation) of the histogram
+ // is at least NativeHistogramMinResetDuration ago, then the whole
+ // histogram is reset to its initial state (including regular
+ // buckets). If less time has passed, or if
+ // NativeHistogramMinResetDuration is zero, no reset is
+ // performed. Instead, the zero threshold is increased sufficiently to
+ // reduce the number of buckets to or below
+ // NativeHistogramMaxBucketNumber, but not to more than
+ // NativeHistogramMaxZeroThreshold. Thus, if
+ // NativeHistogramMaxZeroThreshold is already at or below the current
+ // zero threshold, nothing happens at this step. After that, if the
+ // number of buckets still exceeds NativeHistogramMaxBucketNumber, the
+ // resolution of the histogram is reduced by doubling the width of the
+ // sparse buckets (up to a growth factor between one bucket to the next
+ // of 2^(2^4) = 65536, see above).
+ NativeHistogramMaxBucketNumber uint32
+ NativeHistogramMinResetDuration time.Duration
+ NativeHistogramMaxZeroThreshold float64
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -218,16 +503,29 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
- if len(opts.Buckets) == 0 {
- opts.Buckets = DefBuckets
- }
-
h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- counts: [2]*histogramCounts{{}, {}},
- now: time.Now,
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
+ nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+ nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+ lastResetTime: time.Now(),
+ now: time.Now,
+ }
+ if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+ h.upperBounds = DefBuckets
+ }
+ if opts.NativeHistogramBucketFactor <= 1 {
+ h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+ } else {
+ switch {
+ case opts.NativeHistogramZeroThreshold > 0:
+ h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+ case opts.NativeHistogramZeroThreshold == 0:
+ h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+ } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+ h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -246,8 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
- h.counts[0].buckets = make([]uint64, len(h.upperBounds))
- h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[0] = &histogramCounts{
+ buckets: make([]uint64, len(h.upperBounds)),
+ nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
+ nativeHistogramSchema: h.nativeHistogramSchema,
+ }
+ h.counts[1] = &histogramCounts{
+ buckets: make([]uint64, len(h.upperBounds)),
+ nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
+ nativeHistogramSchema: h.nativeHistogramSchema,
+ }
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@@ -255,13 +561,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
type histogramCounts struct {
+ // Order in this struct matters for the alignment required by atomic
+ // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
// sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ // observations.
sumBits uint64
count uint64
+
+ // nativeHistogramZeroBucket counts all (positive and negative)
+ // observations in the zero bucket (with an absolute value less or equal
+ // the current threshold, see next field.
+ nativeHistogramZeroBucket uint64
+ // nativeHistogramZeroThresholdBits is the bit pattern of the current
+ // threshold for the zero bucket. It's initially equal to
+ // nativeHistogramZeroThreshold but may change according to the bucket
+ // count limitation strategy.
+ nativeHistogramZeroThresholdBits uint64
+ // nativeHistogramSchema may change over time according to the bucket
+ // count limitation strategy and therefore has to be saved here.
+ nativeHistogramSchema int32
+ // Number of (positive and negative) sparse buckets.
+ nativeHistogramBucketsNumber uint32
+
+ // Regular buckets.
buckets []uint64
+
+ // The sparse buckets for native histograms are implemented with a
+ // sync.Map for now. A dedicated data structure will likely be more
+ // efficient. There are separate maps for negative and positive
+ // observations. The map's value is an *int64, counting observations in
+ // that bucket. (Note that we don't use uint64 as an int64 won't
+ // overflow in practice, and working with signed numbers from the
+ // beginning simplifies the handling of deltas.) The map's key is the
+ // index of the bucket according to the used
+ // nativeHistogramSchema. Index 0 is for an upper bound of 1.
+ nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+ if bucket < len(hc.buckets) {
+ atomic.AddUint64(&hc.buckets[bucket], 1)
+ }
+ atomicAddFloat(&hc.sumBits, v)
+ if doSparse && !math.IsNaN(v) {
+ var (
+ key int
+ schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
+ zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+ bucketCreated, isInf bool
+ )
+ if math.IsInf(v, 0) {
+ // Pretend v is MaxFloat64 but later increment key by one.
+ if math.IsInf(v, +1) {
+ v = math.MaxFloat64
+ } else {
+ v = -math.MaxFloat64
+ }
+ isInf = true
+ }
+ frac, exp := math.Frexp(math.Abs(v))
+ if schema > 0 {
+ bounds := nativeHistogramBounds[schema]
+ key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+ } else {
+ key = exp
+ if frac == 0.5 {
+ key--
+ }
+ div := 1 << -schema
+ key = (key + div - 1) / div
+ }
+ if isInf {
+ key++
+ }
+ switch {
+ case v > zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+ case v < -zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+ default:
+ atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+ }
+ if bucketCreated {
+ atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hc.count, 1)
}
type histogram struct {
@@ -276,7 +667,7 @@ type histogram struct {
// perspective of the histogram) swap the hot–cold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
+ // last observation on the now cool one has completed. All cold fields must
// be merged into the new hot before releasing writeMtx.
//
// Fields with atomic access first! See alignment constraint:
@@ -284,8 +675,10 @@ type histogram struct {
countAndHotIdx uint64
selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
+ desc *Desc
+
+ // Only used in the Write method and for sparse bucket management.
+ mtx sync.Mutex
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
@@ -293,9 +686,15 @@ type histogram struct {
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
- upperBounds []float64
- labelPairs []*dto.LabelPair
- exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ upperBounds []float64
+ labelPairs []*dto.LabelPair
+ exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+ nativeHistogramZeroThreshold float64 // The initial zero threshold.
+ nativeHistogramMaxZeroThreshold float64
+ nativeHistogramMaxBuckets uint32
+ nativeHistogramMinResetDuration time.Duration
+ lastResetTime time.Time // Protected by mtx.
now func() time.Time // To mock out time.Now() for testing.
}
@@ -319,8 +718,8 @@ func (h *histogram) Write(out *dto.Metric) error {
// the hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it, if possible at
// all.
- h.writeMtx.Lock()
- defer h.writeMtx.Unlock()
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// without touching the count bits. See the struct comments for a full
@@ -333,16 +732,16 @@ func (h *histogram) Write(out *dto.Metric) error {
hotCounts := h.counts[n>>63]
coldCounts := h.counts[(^n)>>63]
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
+ waitForCooldown(count, coldCounts)
his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
+ out.Histogram = his
+ out.Label = h.labelPairs
+
var cumCount uint64
for i, upperBound := range h.upperBounds {
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
@@ -363,25 +762,21 @@ func (h *histogram) Write(out *dto.Metric) error {
}
his.Bucket = append(his.Bucket, b)
}
-
- out.Histogram = his
- out.Label = h.labelPairs
-
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
- }
- }
- for i := range h.upperBounds {
- atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
- atomic.StoreUint64(&coldCounts.buckets[i], 0)
+ if h.nativeHistogramSchema > math.MinInt32 {
+ his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+ his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+ zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
+
+ defer func() {
+ coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+ coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+ }()
+
+ his.ZeroCount = proto.Uint64(zeroBucket)
+ his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+ his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
}
+ addAndResetCounts(hotCounts, coldCounts)
return nil
}
@@ -402,25 +797,216 @@ func (h *histogram) findBucket(v float64) int {
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
+ // Do not add to sparse buckets for NaN observations.
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
+ hotCounts.observe(v, bucket, doSparse)
+ if doSparse {
+ h.limitBuckets(hotCounts, v, bucket)
+ }
+}
- if bucket < len(h.upperBounds) {
- atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+// limitSparsebuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+ if h.nativeHistogramMaxBuckets == 0 {
+ return // No limit configured.
}
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded yet.
+ }
+
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ // The hot counts might have been swapped just before we acquired the
+ // lock. Re-fetch the hot counts first...
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hotCounts := h.counts[hotIdx]
+ coldCounts := h.counts[coldIdx]
+ // ...and then check again if we really have to reduce the bucket count.
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded after all.
+ }
+ // Try the various strategies in order.
+ if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+ return
+ }
+ if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+ return
+ }
+ h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
+// has been passed. It returns true if the histogram has been reset. The caller
+// must have locked h.mtx.
+func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
+ // We are using the possibly mocked h.now() rather than
+ // time.Since(h.lastResetTime) to enable testing.
+ if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+ return false
+ }
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Repeat the latest observation to not lose it completely.
+ cold.observe(value, bucket, true)
+ // Make coldCounts the new hot counts while ressetting countAndHotIdx.
+ n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ return true
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+ currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+ if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+ return false
+ }
+ // Find the key of the bucket closest to zero.
+ smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+ smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+ if smallestNegativeKey < smallestKey {
+ smallestKey = smallestNegativeKey
+ }
+ if smallestKey == math.MaxInt32 {
+ return false
+ }
+ newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+ if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+ return false // New threshold would exceed the max threshold.
+ }
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // Remove applicable buckets.
+ if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ // Make cold counts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the new zero threshold in the cold counts, too...
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // ...and then merge the newly deleted buckets into the wider zero
+ // bucket.
+ mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ if key == smallestKey {
+ // Merge into hot zero bucket...
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+ // ...and delete from cold counts.
+ coldBuckets.Delete(key)
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ } else {
+ // Add to corresponding hot bucket...
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ // ...and reset cold bucket.
+ atomic.StoreInt64(bucket, 0)
+ }
+ return true
}
}
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
+
+ cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+ return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+ coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+ if coldSchema == -4 {
+ return // Already at lowest resolution.
+ }
+ coldSchema--
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // Play it simple and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+ // Make coldCounts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the schema in the cold counts, too...
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // ...and then merge the cold buckets into the wider hot buckets.
+ merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ // Adjust key to match the bucket to merge into.
+ if key > 0 {
+ key++
+ }
+ key /= 2
+ // Add to corresponding hot bucket.
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ return true
+ }
+ }
+
+ cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+ // Play it simple again and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+ atomic.StoreUint64(&counts.sumBits, 0)
+ atomic.StoreUint64(&counts.count, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+ atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+ for i := range h.upperBounds {
+ atomic.StoreUint64(&counts.buckets[i], 0)
+ }
+ deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+ deleteSyncMap(&counts.nativeHistogramBucketsPositive)
}
// updateExemplar replaces the exemplar for the provided bucket. With empty
@@ -516,7 +1102,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
h, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -527,7 +1114,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
// With works as GetMetricWith but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *HistogramVec) With(labels Labels) Observer {
h, err := v.GetMetricWith(labels)
if err != nil {
@@ -613,7 +1201,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// to send it to Prometheus in the Collect method.
//
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
//
// NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
@@ -668,3 +1256,229 @@ func (s buckSort) Swap(i, j int) {
func (s buckSort) Less(i, j int) bool {
return s[i].GetUpperBound() < s[j].GetUpperBound()
}
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+// - bucketFactor <= 1: panics.
+// - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+ if bucketFactor <= 1 {
+ panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+ }
+ floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+ switch {
+ case floor <= -8:
+ return 8
+ case floor >= 4:
+ return -4
+ default:
+ return -int32(floor)
+ }
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+ var ii []int
+ buckets.Range(func(k, v interface{}) bool {
+ ii = append(ii, k.(int))
+ return true
+ })
+ sort.Ints(ii)
+
+ if len(ii) == 0 {
+ return nil, nil
+ }
+
+ var (
+ spans []*dto.BucketSpan
+ deltas []int64
+ prevCount int64
+ nextI int
+ )
+
+ appendDelta := func(count int64) {
+ *spans[len(spans)-1].Length++
+ deltas = append(deltas, count-prevCount)
+ prevCount = count
+ }
+
+ for n, i := range ii {
+ v, _ := buckets.Load(i)
+ count := atomic.LoadInt64(v.(*int64))
+ // Multiple spans with only small gaps in between are probably
+ // encoded more efficiently as one larger span with a few empty
+ // buckets. Needs some research to find the sweet spot. For now,
+ // we assume that gaps of one ore two buckets should not create
+ // a new span.
+ iDelta := int32(i - nextI)
+ if n == 0 || iDelta > 2 {
+ // We have to create a new span, either because we are
+ // at the very beginning, or because we have found a gap
+ // of more than two buckets.
+ spans = append(spans, &dto.BucketSpan{
+ Offset: proto.Int32(iDelta),
+ Length: proto.Uint32(0),
+ })
+ } else {
+ // We have found a small gap (or no gap at all).
+ // Insert empty buckets as needed.
+ for j := int32(0); j < iDelta; j++ {
+ appendDelta(0)
+ }
+ }
+ appendDelta(count)
+ nextI = i + 1
+ }
+ return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+ if existingBucket, ok := buckets.Load(key); ok {
+ // Fast path without allocation.
+ atomic.AddInt64(existingBucket.(*int64), increment)
+ return false
+ }
+ // Bucket doesn't exist yet. Slow path allocating new counter.
+ newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+ if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+ // The bucket was created concurrently in another goroutine.
+ // Have to increment after all.
+ atomic.AddInt64(actualBucket.(*int64), increment)
+ return false
+ }
+ return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ bucket := v.(*int64)
+ if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(bucketNumber, 1)
+ }
+ atomic.StoreInt64(bucket, 0)
+ return true
+ }
+}
+
+func deleteSyncMap(m *sync.Map) {
+ m.Range(func(k, v interface{}) bool {
+ m.Delete(k)
+ return true
+ })
+}
+
+func findSmallestKey(m *sync.Map) int {
+ result := math.MaxInt32
+ m.Range(func(k, v interface{}) bool {
+ key := k.(int)
+ if key < result {
+ result = key
+ }
+ return true
+ })
+ return result
+}
+
+func getLe(key int, schema int32) float64 {
+ // Here a bit of context about the behavior for the last bucket counting
+ // regular numbers (called simply "last bucket" below) and the bucket
+ // counting observations of ±Inf (called "inf bucket" below, with a key
+ // one higher than that of the "last bucket"):
+ //
+ // If we apply the usual formula to the last bucket, its upper bound
+ // would be calculated as +Inf. The reason is that the max possible
+ // regular float64 number (math.MaxFloat64) doesn't coincide with one of
+ // the calculated bucket boundaries. So the calculated boundary has to
+ // be larger than math.MaxFloat64, and the only float64 larger than
+ // math.MaxFloat64 is +Inf. However, we want to count actual
+ // observations of ±Inf in the inf bucket. Therefore, we have to treat
+ // the upper bound of the last bucket specially and set it to
+ // math.MaxFloat64. (The upper bound of the inf bucket, with its key
+ // being one higher than that of the last bucket, naturally comes out as
+ // +Inf by the usual formula. So that's fine.)
+ //
+ // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+ // 1024. If there were a float64 number following math.MaxFloat64, it
+ // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+ // of 0.5 and an exp of 1025. However, since frac must be smaller than
+ // 1, and exp must be smaller than 1025, either representation overflows
+ // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+ // largest possible float64. Q.E.D.) However, the formula for
+ // calculating the upper bound from the idx and schema of the last
+ // bucket results in precisely that. It is either frac=1.0 & exp=1024
+ // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+ // by the way, a power of two where the exponent itself is a power of
+ // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+ // schemas.) So these are the special cases we have to catch below.
+ if schema < 0 {
+ exp := key << -schema
+ if exp == 1024 {
+ // This is the last bucket before the overflow bucket
+ // (for ±Inf observations). Return math.MaxFloat64 as
+ // explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(1, exp)
+ }
+
+ fracIdx := key & ((1 << schema) - 1)
+ frac := nativeHistogramBounds[schema][fracIdx]
+ exp := (key >> schema) + 1
+ if frac == 0.5 && exp == 1025 {
+ // This is the last bucket before the overflow bucket (for ±Inf
+ // observations). Return math.MaxFloat64 as explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+ for count != atomic.LoadUint64(&counts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+ for {
+ loadedBits := atomic.LoadUint64(bits)
+ newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+ if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+ break
+ }
+ }
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to. See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+ atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+ atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+ atomic.StoreUint64(&cold.count, 0)
+ coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+ atomicAddFloat(&hot.sumBits, coldSum)
+ atomic.StoreUint64(&cold.sumBits, 0)
+ for i := range hot.buckets {
+ atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+ atomic.StoreUint64(&cold.buckets[i], 0)
+ }
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+ atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 000000000..1ed5abe74
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+ "math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+ if a == b {
+ return true
+ }
+ absA := math.Abs(a)
+ absB := math.Abs(b)
+ diff := math.Abs(a - b)
+ if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+ return diff < epsilon*minNormalFloat64
+ }
+ return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
index fd45cadc0..fd0750f2c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -201,12 +201,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-// alo <= i <= i+k <= ahi
-// blo <= j <= j+k <= bhi
+//
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+//
// and for all (i',j',k') meeting those conditions,
-// k >= k'
-// i <= i'
-// and if i == i', j <= j'
+//
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 6eee198fe..c1b8fad36 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -25,7 +25,8 @@ import (
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index f0941f6f0..b5119c504 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -187,7 +187,7 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
} else {
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
b := &dto.Bucket{
- CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
+ CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e,
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
index f8d50d1f9..8031e8704 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
@@ -14,114 +14,114 @@
// Package promauto provides alternative constructors for the fundamental
// Prometheus metric types and their …Vec and …Func variants. The difference to
// their counterparts in the prometheus package is that the promauto
-// constructors return Collectors that are already registered with a
-// registry. There are two sets of constructors. The constructors in the first
-// set are top-level functions, while the constructors in the other set are
-// methods of the Factory type. The top-level function return Collectors
-// registered with the global registry (prometheus.DefaultRegisterer), while the
-// methods return Collectors registered with the registry the Factory was
-// constructed with. All constructors panic if the registration fails.
+// constructors register the Collectors with a registry before returning them.
+// There are two sets of constructors. The constructors in the first set are
+// top-level functions, while the constructors in the other set are methods of
+// the Factory type. The top-level function return Collectors registered with
+// the global registry (prometheus.DefaultRegisterer), while the methods return
+// Collectors registered with the registry the Factory was constructed with. All
+// constructors panic if the registration fails.
//
// The following example is a complete program to create a histogram of normally
// distributed random numbers from the math/rand package:
//
-// package main
+// package main
//
-// import (
-// "math/rand"
-// "net/http"
+// import (
+// "math/rand"
+// "net/http"
//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promauto"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promauto"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
//
-// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
-// Name: "random_numbers",
-// Help: "A histogram of normally distributed random numbers.",
-// Buckets: prometheus.LinearBuckets(-3, .1, 61),
-// })
+// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
+// Name: "random_numbers",
+// Help: "A histogram of normally distributed random numbers.",
+// Buckets: prometheus.LinearBuckets(-3, .1, 61),
+// })
//
-// func Random() {
-// for {
-// histogram.Observe(rand.NormFloat64())
-// }
-// }
+// func Random() {
+// for {
+// histogram.Observe(rand.NormFloat64())
+// }
+// }
//
-// func main() {
-// go Random()
-// http.Handle("/metrics", promhttp.Handler())
-// http.ListenAndServe(":1971", nil)
-// }
+// func main() {
+// go Random()
+// http.Handle("/metrics", promhttp.Handler())
+// http.ListenAndServe(":1971", nil)
+// }
//
// Prometheus's version of a minimal hello-world program:
//
-// package main
+// package main
//
-// import (
-// "fmt"
-// "net/http"
+// import (
+// "fmt"
+// "net/http"
//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promauto"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promauto"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
//
-// func main() {
-// http.Handle("/", promhttp.InstrumentHandlerCounter(
-// promauto.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hello_requests_total",
-// Help: "Total number of hello-world requests by HTTP code.",
-// },
-// []string{"code"},
-// ),
-// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-// fmt.Fprint(w, "Hello, world!")
-// }),
-// ))
-// http.Handle("/metrics", promhttp.Handler())
-// http.ListenAndServe(":1971", nil)
-// }
+// func main() {
+// http.Handle("/", promhttp.InstrumentHandlerCounter(
+// promauto.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hello_requests_total",
+// Help: "Total number of hello-world requests by HTTP code.",
+// },
+// []string{"code"},
+// ),
+// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// fmt.Fprint(w, "Hello, world!")
+// }),
+// ))
+// http.Handle("/metrics", promhttp.Handler())
+// http.ListenAndServe(":1971", nil)
+// }
//
// A Factory is created with the With(prometheus.Registerer) function, which
// enables two usage pattern. With(prometheus.Registerer) can be called once per
// line:
//
-// var (
-// reg = prometheus.NewRegistry()
-// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
-// Name: "random_numbers",
-// Help: "A histogram of normally distributed random numbers.",
-// Buckets: prometheus.LinearBuckets(-3, .1, 61),
-// })
-// requestCount = promauto.With(reg).NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "http_requests_total",
-// Help: "Total number of HTTP requests by status code and method.",
-// },
-// []string{"code", "method"},
-// )
-// )
+// var (
+// reg = prometheus.NewRegistry()
+// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
+// Name: "random_numbers",
+// Help: "A histogram of normally distributed random numbers.",
+// Buckets: prometheus.LinearBuckets(-3, .1, 61),
+// })
+// requestCount = promauto.With(reg).NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "http_requests_total",
+// Help: "Total number of HTTP requests by status code and method.",
+// },
+// []string{"code", "method"},
+// )
+// )
//
// Or it can be used to create a Factory once to be used multiple times:
//
-// var (
-// reg = prometheus.NewRegistry()
-// factory = promauto.With(reg)
-// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{
-// Name: "random_numbers",
-// Help: "A histogram of normally distributed random numbers.",
-// Buckets: prometheus.LinearBuckets(-3, .1, 61),
-// })
-// requestCount = factory.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "http_requests_total",
-// Help: "Total number of HTTP requests by status code and method.",
-// },
-// []string{"code", "method"},
-// )
-// )
+// var (
+// reg = prometheus.NewRegistry()
+// factory = promauto.With(reg)
+// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{
+// Name: "random_numbers",
+// Help: "A histogram of normally distributed random numbers.",
+// Buckets: prometheus.LinearBuckets(-3, .1, 61),
+// })
+// requestCount = factory.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "http_requests_total",
+// Help: "Total number of HTTP requests by status code and method.",
+// },
+// []string{"code", "method"},
+// )
+// )
//
// This appears very handy. So why are these constructors locked away in a
// separate package?
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 097aff2df..210867816 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -73,12 +73,11 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- exemplarAdd(
+ addWithExemplar(
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
1,
rtOpts.getExemplarFn(r.Context()),
)
- counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
}
return resp, err
}
@@ -117,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
time.Since(start).Seconds(),
rtOpts.getExemplarFn(r.Context()),
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index bfe500987..cca67a78a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -28,7 +28,9 @@ import (
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
-func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
if labels == nil {
obs.Observe(val)
return
@@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str
obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
}
-func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
if labels == nil {
obs.Add(val)
return
@@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
time.Since(now).Seconds(),
hOpts.getExemplarFn(r.Context()),
@@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
now := time.Now()
next.ServeHTTP(w, r)
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
time.Since(now).Seconds(),
hOpts.getExemplarFn(r.Context()),
@@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- exemplarAdd(
+ addWithExemplar(
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
1,
hOpts.getExemplarFn(r.Context()),
@@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- exemplarAdd(
+ addWithExemplar(
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
1,
hOpts.getExemplarFn(r.Context()),
@@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
time.Since(now).Seconds(),
hOpts.getExemplarFn(r.Context()),
@@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
float64(size),
hOpts.getExemplarFn(r.Context()),
@@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
float64(size),
hOpts.getExemplarFn(r.Context()),
@@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- exemplarObserve(
+ observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
float64(d.Written()),
hOpts.getExemplarFn(r.Context()),
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 325f665ff..09e34d307 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error {
}
// Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
type Registry struct {
mtx sync.RWMutex
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
@@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ // Only report the checked Collectors; unchecked collectors don't report any
+ // Desc.
+ for _, c := range r.collectorsByID {
+ c.Describe(ch)
+ }
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ for _, c := range r.collectorsByID {
+ c.Collect(ch)
+ }
+ for _, c := range r.uncheckedCollectors {
+ c.Collect(ch)
+ }
+}
+
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
// Prometheus text format, and writes it to a temporary file. Upon success, the
// temporary file is renamed to the provided filename.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index c5fa8ed7c..7bc448a89 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -603,7 +603,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
s, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -614,7 +615,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *SummaryVec) With(labels Labels) Observer {
s, err := v.GetMetricWith(labels)
if err != nil {
@@ -701,7 +703,8 @@ func (s *constSummary) Write(out *dto.Metric) error {
//
// quantiles maps ranks to quantile values. For example, a median latency of
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index 8d5f10523..f28a76f3a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -25,11 +25,12 @@ type Timer struct {
// NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the
// following way:
-// func TimeMe() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDuration()
-// // Do actual work.
-// }
+//
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 2f4930d9d..2b5bca4b9 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,51 +1,75 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: metrics.proto
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
+// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type MetricType int32
const (
- MetricType_COUNTER MetricType = 0
- MetricType_GAUGE MetricType = 1
- MetricType_SUMMARY MetricType = 2
- MetricType_UNTYPED MetricType = 3
+ // COUNTER must use the Metric field "counter".
+ MetricType_COUNTER MetricType = 0
+ // GAUGE must use the Metric field "gauge".
+ MetricType_GAUGE MetricType = 1
+ // SUMMARY must use the Metric field "summary".
+ MetricType_SUMMARY MetricType = 2
+ // UNTYPED must use the Metric field "untyped".
+ MetricType_UNTYPED MetricType = 3
+ // HISTOGRAM must use the Metric field "histogram".
MetricType_HISTOGRAM MetricType = 4
+ // GAUGE_HISTOGRAM must use the Metric field "histogram".
+ MetricType_GAUGE_HISTOGRAM MetricType = 5
)
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
-}
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
func (x MetricType) Enum() *MetricType {
p := new(MetricType)
@@ -54,670 +78,1255 @@ func (x MetricType) Enum() *MetricType {
}
func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = MetricType(value)
+ *x = MetricType(num)
return nil
}
+// Deprecated: Use MetricType.Descriptor instead.
func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{0}
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return ""
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{1}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{2}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Counter proto.InternalMessageInfo
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{3}
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
-}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
}
return 0
}
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{4}
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
+
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Summary proto.InternalMessageInfo
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
}
return nil
}
type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{5}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
-}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Histogram struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ // Buckets for the conventional histogram.
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+ // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+ // then each power of two is divided into 2^n logarithmic buckets.
+ // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+ // In the future, more bucket schemas may be added using numbers < -4 or > 8.
+ Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
+ // Negative buckets for the native histogram.
+ NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+ // Use either "negative_delta" or "negative_count", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
+ // Positive buckets for the native histogram.
+ PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+ // Use either "positive_delta" or "positive_count", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+}
+
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{6}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
+ }
+ return 0
}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
+ }
+ return 0
}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
+
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
+ }
+ return 0
}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
+
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
+ }
+ return nil
}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
+
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
+ }
+ return 0
}
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
+ }
+ return 0
+}
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
}
return 0
}
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
}
return 0
}
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
}
return nil
}
-type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
+ }
+ return nil
}
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{7}
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
+ }
+ return nil
}
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
+type Bucket struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
+ Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
+
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
+
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
}
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
+ }
+ return 0
+}
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
}
return 0
}
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
}
return 0
}
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
-type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
}
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{8}
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
+ }
+ return 0
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
+ }
+ return 0
}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+
+type Exemplar struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
+
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{9}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Metric proto.InternalMessageInfo
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
}
return nil
}
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
}
return nil
}
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
}
return nil
}
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
}
return nil
}
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
}
return nil
}
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
}
return 0
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{10}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
-}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
-}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
}
return ""
}
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return MetricType_COUNTER
}
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
}
return nil
}
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-}
-
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
-
-var fileDescriptor_6039342a2ba47b72 = []byte{
- // 665 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
- 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
- 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
- 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
- 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
- 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
- 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
- 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
- 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
- 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
- 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
- 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
- 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
- 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
- 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
- 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
- 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
- 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
- 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
- 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
- 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
- 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
- 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
- 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
- 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
- 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
- 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
- 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
- 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
- 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
- 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
- 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
- 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
- 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
- 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
- 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
- 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
- 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
- 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
- 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
- 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+ 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d,
+ 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c,
+ 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a,
+ 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72,
+ 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
+ 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72,
+ 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03,
+ 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74,
+ 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
+ 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12,
+ 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74,
+ 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d,
+ 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01,
+ 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75,
+ 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70,
+ 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a,
+ 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
+ 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69,
+ 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38,
+ 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50,
+ 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61,
+ 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a,
+ 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
+ 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53,
+ 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52,
+ 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69,
+ 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
+ 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a,
+ 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
+ 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41,
+ 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59,
+ 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12,
+ 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13,
+ 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41,
+ 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+ 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f,
+ 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 16, // [16:16] is the sub-list for method output_type
+ 16, // [16:16] is the sub-list for method input_type
+ 16, // [16:16] is the sub-list for extension type_name
+ 16, // [16:16] is the sub-list for extension extendee
+ 0, // [0:16] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
}
diff --git a/vendor/github.com/quic-go/qpack/.codecov.yml b/vendor/github.com/quic-go/qpack/.codecov.yml
new file mode 100644
index 000000000..00064af33
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/.codecov.yml
@@ -0,0 +1,7 @@
+coverage:
+ round: nearest
+ status:
+ project:
+ default:
+ threshold: 1
+ patch: false
diff --git a/vendor/github.com/quic-go/qpack/.gitignore b/vendor/github.com/quic-go/qpack/.gitignore
new file mode 100644
index 000000000..66c189a09
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/.gitignore
@@ -0,0 +1,6 @@
+fuzzing/*.zip
+fuzzing/coverprofile
+fuzzing/crashers
+fuzzing/sonarprofile
+fuzzing/suppressions
+fuzzing/corpus/
diff --git a/vendor/github.com/quic-go/qpack/.gitmodules b/vendor/github.com/quic-go/qpack/.gitmodules
new file mode 100644
index 000000000..5ac16f084
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "integrationtests/interop/qifs"]
+ path = integrationtests/interop/qifs
+ url = https://github.com/qpackers/qifs.git
diff --git a/vendor/github.com/quic-go/qpack/.golangci.yml b/vendor/github.com/quic-go/qpack/.golangci.yml
new file mode 100644
index 000000000..4a91adc77
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/.golangci.yml
@@ -0,0 +1,27 @@
+run:
+linters-settings:
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - deadcode
+ - exhaustive
+ - exportloopref
+ - goconst
+ - gofmt # redundant, since gofmt *should* be a no-op after gofumpt
+ - gofumpt
+ - goimports
+ - gosimple
+ - ineffassign
+ - misspell
+ - prealloc
+ - scopelint
+ - staticcheck
+ - stylecheck
+ - structcheck
+ - unconvert
+ - unparam
+ - unused
+ - varcheck
+ - vet
+
diff --git a/vendor/github.com/nxadm/tail/ratelimiter/Licence b/vendor/github.com/quic-go/qpack/LICENSE.md
similarity index 97%
rename from vendor/github.com/nxadm/tail/ratelimiter/Licence
rename to vendor/github.com/quic-go/qpack/LICENSE.md
index 434aab19f..1ac5a2d9a 100644
--- a/vendor/github.com/nxadm/tail/ratelimiter/Licence
+++ b/vendor/github.com/quic-go/qpack/LICENSE.md
@@ -1,4 +1,4 @@
-Copyright (C) 2013 99designs
+Copyright 2019 Marten Seemann
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
diff --git a/vendor/github.com/quic-go/qpack/README.md b/vendor/github.com/quic-go/qpack/README.md
new file mode 100644
index 000000000..6ba4bad4a
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/README.md
@@ -0,0 +1,20 @@
+# QPACK
+
+[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/marten-seemann/qpack)
+[![Code Coverage](https://img.shields.io/codecov/c/github/marten-seemann/qpack/master.svg?style=flat-square)](https://codecov.io/gh/marten-seemann/qpack)
+
+This is a minimal QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) implementation in Go. It is minimal in the sense that it doesn't use the dynamic table at all, but just the static table and (Huffman encoded) string literals. Wherever possible, it reuses code from the [HPACK implementation in the Go standard library](https://github.com/golang/net/tree/master/http2/hpack).
+
+It should be able to interoperate with other QPACK implemetations (both encoders and decoders), however it won't achieve a high compression efficiency.
+
+## Running the interop tests
+
+Install the [QPACK interop files](https://github.com/qpackers/qifs/) by running
+```bash
+git submodule update --init --recursive
+```
+
+Then run the tests:
+```bash
+ginkgo -r integrationtests
+```
diff --git a/vendor/github.com/quic-go/qpack/decoder.go b/vendor/github.com/quic-go/qpack/decoder.go
new file mode 100644
index 000000000..c90019413
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/decoder.go
@@ -0,0 +1,271 @@
+package qpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sync"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+// A decodingError is something the spec defines as a decoding error.
+type decodingError struct {
+ err error
+}
+
+func (de decodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.err)
+}
+
+// An invalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type invalidIndexError int
+
+func (e invalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+var errNoDynamicTable = decodingError{errors.New("no dynamic table")}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ mutex sync.Mutex
+
+ emitFunc func(f HeaderField)
+
+ readRequiredInsertCount bool
+ readDeltaBase bool
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder
+// The emitFunc will be called for each valid field parsed,
+// in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(emitFunc func(f HeaderField)) *Decoder {
+ return &Decoder{emitFunc: emitFunc}
+}
+
+func (d *Decoder) Write(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ d.mutex.Lock()
+ n, err := d.writeLocked(p)
+ d.mutex.Unlock()
+ return n, err
+}
+
+func (d *Decoder) writeLocked(p []byte) (int, error) {
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ if err := d.decode(); err != nil {
+ if err != errNeedMore {
+ return 0, err
+ }
+ // TODO: limit the size of the buffer
+ d.saveBuf.Write(d.buf)
+ }
+ return len(p), nil
+}
+
+// DecodeFull decodes an entire block.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ if len(p) == 0 {
+ return []HeaderField{}, nil
+ }
+
+ d.mutex.Lock()
+ defer d.mutex.Unlock()
+
+ saveFunc := d.emitFunc
+ defer func() { d.emitFunc = saveFunc }()
+
+ var hf []HeaderField
+ d.emitFunc = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.writeLocked(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+// Close declares that the decoding is complete and resets the Decoder
+// to be reused again for a new header block. If there is any remaining
+// data in the decoder's buffer, Close returns an error.
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return decodingError{errors.New("truncated headers")}
+ }
+ d.readRequiredInsertCount = false
+ d.readDeltaBase = false
+ return nil
+}
+
+func (d *Decoder) decode() error {
+ if !d.readRequiredInsertCount {
+ requiredInsertCount, rest, err := readVarInt(8, d.buf)
+ if err != nil {
+ return err
+ }
+ d.readRequiredInsertCount = true
+ if requiredInsertCount != 0 {
+ return decodingError{errors.New("expected Required Insert Count to be zero")}
+ }
+ d.buf = rest
+ }
+ if !d.readDeltaBase {
+ base, rest, err := readVarInt(7, d.buf)
+ if err != nil {
+ return err
+ }
+ d.readDeltaBase = true
+ if base != 0 {
+ return decodingError{errors.New("expected Base to be zero")}
+ }
+ d.buf = rest
+ }
+ if len(d.buf) == 0 {
+ return errNeedMore
+ }
+
+ for len(d.buf) > 0 {
+ b := d.buf[0]
+ var err error
+ switch {
+ case b&0x80 > 0: // 1xxxxxxx
+ err = d.parseIndexedHeaderField()
+ case b&0xc0 == 0x40: // 01xxxxxx
+ err = d.parseLiteralHeaderField()
+ case b&0xe0 == 0x20: // 001xxxxx
+ err = d.parseLiteralHeaderFieldWithoutNameReference()
+ default:
+ err = fmt.Errorf("unexpected type byte: %#x", b)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *Decoder) parseIndexedHeaderField() error {
+ buf := d.buf
+ if buf[0]&0x40 == 0 {
+ return errNoDynamicTable
+ }
+ index, buf, err := readVarInt(6, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(index)
+ if !ok {
+ return decodingError{invalidIndexError(index)}
+ }
+ d.emitFunc(hf)
+ d.buf = buf
+ return nil
+}
+
+func (d *Decoder) parseLiteralHeaderField() error {
+ buf := d.buf
+ if buf[0]&0x20 > 0 || buf[0]&0x10 == 0 {
+ return errNoDynamicTable
+ }
+ index, buf, err := readVarInt(4, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(index)
+ if !ok {
+ return decodingError{invalidIndexError(index)}
+ }
+ if len(buf) == 0 {
+ return errNeedMore
+ }
+ usesHuffman := buf[0]&0x80 > 0
+ val, buf, err := d.readString(buf, 7, usesHuffman)
+ if err != nil {
+ return err
+ }
+ hf.Value = val
+ d.emitFunc(hf)
+ d.buf = buf
+ return nil
+}
+
+func (d *Decoder) parseLiteralHeaderFieldWithoutNameReference() error {
+ buf := d.buf
+ usesHuffmanForName := buf[0]&0x8 > 0
+ name, buf, err := d.readString(buf, 3, usesHuffmanForName)
+ if err != nil {
+ return err
+ }
+ if len(buf) == 0 {
+ return errNeedMore
+ }
+ usesHuffmanForVal := buf[0]&0x80 > 0
+ val, buf, err := d.readString(buf, 7, usesHuffmanForVal)
+ if err != nil {
+ return err
+ }
+ d.emitFunc(HeaderField{Name: name, Value: val})
+ d.buf = buf
+ return nil
+}
+
+func (d *Decoder) readString(buf []byte, n uint8, usesHuffman bool) (string, []byte, error) {
+ l, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return "", nil, err
+ }
+ if uint64(len(buf)) < l {
+ return "", nil, errNeedMore
+ }
+ var val string
+ if usesHuffman {
+ var err error
+ val, err = hpack.HuffmanDecodeToString(buf[:l])
+ if err != nil {
+ return "", nil, err
+ }
+ } else {
+ val = string(buf[:l])
+ }
+ buf = buf[l:]
+ return val, buf, nil
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ if i >= uint64(len(staticTableEntries)) {
+ return
+ }
+ return staticTableEntries[i], true
+}
diff --git a/vendor/github.com/quic-go/qpack/encoder.go b/vendor/github.com/quic-go/qpack/encoder.go
new file mode 100644
index 000000000..ad6953537
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/encoder.go
@@ -0,0 +1,95 @@
+package qpack
+
+import (
+ "io"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+// An Encoder performs QPACK encoding.
+type Encoder struct {
+ wrotePrefix bool
+
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs QPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for the Header Block Prefix
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ // write the Header Block Prefix
+ if !e.wrotePrefix {
+ e.buf = appendVarInt(e.buf, 8, 0)
+ e.buf = appendVarInt(e.buf, 7, 0)
+ e.wrotePrefix = true
+ }
+
+ idxAndVals, nameFound := encoderMap[f.Name]
+ if nameFound {
+ if idxAndVals.values == nil {
+ if len(f.Value) == 0 {
+ e.writeIndexedField(idxAndVals.idx)
+ } else {
+ e.writeLiteralFieldWithNameReference(&f, idxAndVals.idx)
+ }
+ } else {
+ valIdx, valueFound := idxAndVals.values[f.Value]
+ if valueFound {
+ e.writeIndexedField(valIdx)
+ } else {
+ e.writeLiteralFieldWithNameReference(&f, idxAndVals.idx)
+ }
+ }
+ } else {
+ e.writeLiteralFieldWithoutNameReference(f)
+ }
+
+ _, err := e.w.Write(e.buf)
+ e.buf = e.buf[:0]
+ return err
+}
+
+// Close declares that the encoding is complete and resets the Encoder
+// to be reused again for a new header block.
+func (e *Encoder) Close() error {
+ e.wrotePrefix = false
+ return nil
+}
+
+func (e *Encoder) writeLiteralFieldWithoutNameReference(f HeaderField) {
+ offset := len(e.buf)
+ e.buf = appendVarInt(e.buf, 3, hpack.HuffmanEncodeLength(f.Name))
+ e.buf[offset] ^= 0x20 ^ 0x8
+ e.buf = hpack.AppendHuffmanString(e.buf, f.Name)
+ offset = len(e.buf)
+ e.buf = appendVarInt(e.buf, 7, hpack.HuffmanEncodeLength(f.Value))
+ e.buf[offset] ^= 0x80
+ e.buf = hpack.AppendHuffmanString(e.buf, f.Value)
+}
+
+// Encodes a header field whose name is present in one of the tables.
+func (e *Encoder) writeLiteralFieldWithNameReference(f *HeaderField, id uint8) {
+ offset := len(e.buf)
+ e.buf = appendVarInt(e.buf, 4, uint64(id))
+ // Set the 01NTxxxx pattern, forcing N to 0 and T to 1
+ e.buf[offset] ^= 0x50
+ offset = len(e.buf)
+ e.buf = appendVarInt(e.buf, 7, hpack.HuffmanEncodeLength(f.Value))
+ e.buf[offset] ^= 0x80
+ e.buf = hpack.AppendHuffmanString(e.buf, f.Value)
+}
+
+// Encodes an indexed field, meaning it's entirely defined in one of the tables.
+func (e *Encoder) writeIndexedField(id uint8) {
+ offset := len(e.buf)
+ e.buf = appendVarInt(e.buf, 6, uint64(id))
+ // Set the 1Txxxxxx pattern, forcing T to 1
+ e.buf[offset] ^= 0xc0
+}
diff --git a/vendor/github.com/quic-go/qpack/header_field.go b/vendor/github.com/quic-go/qpack/header_field.go
new file mode 100644
index 000000000..4c043a992
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/header_field.go
@@ -0,0 +1,16 @@
+package qpack
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name string
+ Value string
+}
+
+// IsPseudo reports whether the header field is an HTTP3 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
diff --git a/vendor/github.com/quic-go/qpack/static_table.go b/vendor/github.com/quic-go/qpack/static_table.go
new file mode 100644
index 000000000..73c365e1f
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/static_table.go
@@ -0,0 +1,255 @@
+package qpack
+
+var staticTableEntries = [...]HeaderField{
+ {Name: ":authority"},
+ {Name: ":path", Value: "/"},
+ {Name: "age", Value: "0"},
+ {Name: "content-disposition"},
+ {Name: "content-length", Value: "0"},
+ {Name: "cookie"},
+ {Name: "date"},
+ {Name: "etag"},
+ {Name: "if-modified-since"},
+ {Name: "if-none-match"},
+ {Name: "last-modified"},
+ {Name: "link"},
+ {Name: "location"},
+ {Name: "referer"},
+ {Name: "set-cookie"},
+ {Name: ":method", Value: "CONNECT"},
+ {Name: ":method", Value: "DELETE"},
+ {Name: ":method", Value: "GET"},
+ {Name: ":method", Value: "HEAD"},
+ {Name: ":method", Value: "OPTIONS"},
+ {Name: ":method", Value: "POST"},
+ {Name: ":method", Value: "PUT"},
+ {Name: ":scheme", Value: "http"},
+ {Name: ":scheme", Value: "https"},
+ {Name: ":status", Value: "103"},
+ {Name: ":status", Value: "200"},
+ {Name: ":status", Value: "304"},
+ {Name: ":status", Value: "404"},
+ {Name: ":status", Value: "503"},
+ {Name: "accept", Value: "*/*"},
+ {Name: "accept", Value: "application/dns-message"},
+ {Name: "accept-encoding", Value: "gzip, deflate, br"},
+ {Name: "accept-ranges", Value: "bytes"},
+ {Name: "access-control-allow-headers", Value: "cache-control"},
+ {Name: "access-control-allow-headers", Value: "content-type"},
+ {Name: "access-control-allow-origin", Value: "*"},
+ {Name: "cache-control", Value: "max-age=0"},
+ {Name: "cache-control", Value: "max-age=2592000"},
+ {Name: "cache-control", Value: "max-age=604800"},
+ {Name: "cache-control", Value: "no-cache"},
+ {Name: "cache-control", Value: "no-store"},
+ {Name: "cache-control", Value: "public, max-age=31536000"},
+ {Name: "content-encoding", Value: "br"},
+ {Name: "content-encoding", Value: "gzip"},
+ {Name: "content-type", Value: "application/dns-message"},
+ {Name: "content-type", Value: "application/javascript"},
+ {Name: "content-type", Value: "application/json"},
+ {Name: "content-type", Value: "application/x-www-form-urlencoded"},
+ {Name: "content-type", Value: "image/gif"},
+ {Name: "content-type", Value: "image/jpeg"},
+ {Name: "content-type", Value: "image/png"},
+ {Name: "content-type", Value: "text/css"},
+ {Name: "content-type", Value: "text/html; charset=utf-8"},
+ {Name: "content-type", Value: "text/plain"},
+ {Name: "content-type", Value: "text/plain;charset=utf-8"},
+ {Name: "range", Value: "bytes=0-"},
+ {Name: "strict-transport-security", Value: "max-age=31536000"},
+ {Name: "strict-transport-security", Value: "max-age=31536000; includesubdomains"},
+ {Name: "strict-transport-security", Value: "max-age=31536000; includesubdomains; preload"},
+ {Name: "vary", Value: "accept-encoding"},
+ {Name: "vary", Value: "origin"},
+ {Name: "x-content-type-options", Value: "nosniff"},
+ {Name: "x-xss-protection", Value: "1; mode=block"},
+ {Name: ":status", Value: "100"},
+ {Name: ":status", Value: "204"},
+ {Name: ":status", Value: "206"},
+ {Name: ":status", Value: "302"},
+ {Name: ":status", Value: "400"},
+ {Name: ":status", Value: "403"},
+ {Name: ":status", Value: "421"},
+ {Name: ":status", Value: "425"},
+ {Name: ":status", Value: "500"},
+ {Name: "accept-language"},
+ {Name: "access-control-allow-credentials", Value: "FALSE"},
+ {Name: "access-control-allow-credentials", Value: "TRUE"},
+ {Name: "access-control-allow-headers", Value: "*"},
+ {Name: "access-control-allow-methods", Value: "get"},
+ {Name: "access-control-allow-methods", Value: "get, post, options"},
+ {Name: "access-control-allow-methods", Value: "options"},
+ {Name: "access-control-expose-headers", Value: "content-length"},
+ {Name: "access-control-request-headers", Value: "content-type"},
+ {Name: "access-control-request-method", Value: "get"},
+ {Name: "access-control-request-method", Value: "post"},
+ {Name: "alt-svc", Value: "clear"},
+ {Name: "authorization"},
+ {Name: "content-security-policy", Value: "script-src 'none'; object-src 'none'; base-uri 'none'"},
+ {Name: "early-data", Value: "1"},
+ {Name: "expect-ct"},
+ {Name: "forwarded"},
+ {Name: "if-range"},
+ {Name: "origin"},
+ {Name: "purpose", Value: "prefetch"},
+ {Name: "server"},
+ {Name: "timing-allow-origin", Value: "*"},
+ {Name: "upgrade-insecure-requests", Value: "1"},
+ {Name: "user-agent"},
+ {Name: "x-forwarded-for"},
+ {Name: "x-frame-options", Value: "deny"},
+ {Name: "x-frame-options", Value: "sameorigin"},
+}
+
+// Only needed for tests.
+// use go:linkname to retrieve the static table.
+//
+//nolint:deadcode,unused
+func getStaticTable() []HeaderField {
+ return staticTableEntries[:]
+}
+
+type indexAndValues struct {
+ idx uint8
+ values map[string]uint8
+}
+
+// A map of the header names from the static table to their index in the table.
+// This is used by the encoder to quickly find if a header is in the static table
+// and what value should be used to encode it.
+// There's a second level of mapping for the headers that have some predefined
+// values in the static table.
+var encoderMap = map[string]indexAndValues{
+ ":authority": {0, nil},
+ ":path": {1, map[string]uint8{"/": 1}},
+ "age": {2, map[string]uint8{"0": 2}},
+ "content-disposition": {3, nil},
+ "content-length": {4, map[string]uint8{"0": 4}},
+ "cookie": {5, nil},
+ "date": {6, nil},
+ "etag": {7, nil},
+ "if-modified-since": {8, nil},
+ "if-none-match": {9, nil},
+ "last-modified": {10, nil},
+ "link": {11, nil},
+ "location": {12, nil},
+ "referer": {13, nil},
+ "set-cookie": {14, nil},
+ ":method": {15, map[string]uint8{
+ "CONNECT": 15,
+ "DELETE": 16,
+ "GET": 17,
+ "HEAD": 18,
+ "OPTIONS": 19,
+ "POST": 20,
+ "PUT": 21,
+ }},
+ ":scheme": {22, map[string]uint8{
+ "http": 22,
+ "https": 23,
+ }},
+ ":status": {24, map[string]uint8{
+ "103": 24,
+ "200": 25,
+ "304": 26,
+ "404": 27,
+ "503": 28,
+ "100": 63,
+ "204": 64,
+ "206": 65,
+ "302": 66,
+ "400": 67,
+ "403": 68,
+ "421": 69,
+ "425": 70,
+ "500": 71,
+ }},
+ "accept": {29, map[string]uint8{
+ "*/*": 29,
+ "application/dns-message": 30,
+ }},
+ "accept-encoding": {31, map[string]uint8{"gzip, deflate, br": 31}},
+ "accept-ranges": {32, map[string]uint8{"bytes": 32}},
+ "access-control-allow-headers": {33, map[string]uint8{
+ "cache-control": 33,
+ "content-type": 34,
+ "*": 75,
+ }},
+ "access-control-allow-origin": {35, map[string]uint8{"*": 35}},
+ "cache-control": {36, map[string]uint8{
+ "max-age=0": 36,
+ "max-age=2592000": 37,
+ "max-age=604800": 38,
+ "no-cache": 39,
+ "no-store": 40,
+ "public, max-age=31536000": 41,
+ }},
+ "content-encoding": {42, map[string]uint8{
+ "br": 42,
+ "gzip": 43,
+ }},
+ "content-type": {44, map[string]uint8{
+ "application/dns-message": 44,
+ "application/javascript": 45,
+ "application/json": 46,
+ "application/x-www-form-urlencoded": 47,
+ "image/gif": 48,
+ "image/jpeg": 49,
+ "image/png": 50,
+ "text/css": 51,
+ "text/html; charset=utf-8": 52,
+ "text/plain": 53,
+ "text/plain;charset=utf-8": 54,
+ }},
+ "range": {55, map[string]uint8{"bytes=0-": 55}},
+ "strict-transport-security": {56, map[string]uint8{
+ "max-age=31536000": 56,
+ "max-age=31536000; includesubdomains": 57,
+ "max-age=31536000; includesubdomains; preload": 58,
+ }},
+ "vary": {59, map[string]uint8{
+ "accept-encoding": 59,
+ "origin": 60,
+ }},
+ "x-content-type-options": {61, map[string]uint8{"nosniff": 61}},
+ "x-xss-protection": {62, map[string]uint8{"1; mode=block": 62}},
+ // ":status" is duplicated and takes index 63 to 71
+ "accept-language": {72, nil},
+ "access-control-allow-credentials": {73, map[string]uint8{
+ "FALSE": 73,
+ "TRUE": 74,
+ }},
+ // "access-control-allow-headers" is duplicated and takes index 75
+ "access-control-allow-methods": {76, map[string]uint8{
+ "get": 76,
+ "get, post, options": 77,
+ "options": 78,
+ }},
+ "access-control-expose-headers": {79, map[string]uint8{"content-length": 79}},
+ "access-control-request-headers": {80, map[string]uint8{"content-type": 80}},
+ "access-control-request-method": {81, map[string]uint8{
+ "get": 81,
+ "post": 82,
+ }},
+ "alt-svc": {83, map[string]uint8{"clear": 83}},
+ "authorization": {84, nil},
+ "content-security-policy": {85, map[string]uint8{
+ "script-src 'none'; object-src 'none'; base-uri 'none'": 85,
+ }},
+ "early-data": {86, map[string]uint8{"1": 86}},
+ "expect-ct": {87, nil},
+ "forwarded": {88, nil},
+ "if-range": {89, nil},
+ "origin": {90, nil},
+ "purpose": {91, map[string]uint8{"prefetch": 91}},
+ "server": {92, nil},
+ "timing-allow-origin": {93, map[string]uint8{"*": 93}},
+ "upgrade-insecure-requests": {94, map[string]uint8{"1": 94}},
+ "user-agent": {95, nil},
+ "x-forwarded-for": {96, nil},
+ "x-frame-options": {97, map[string]uint8{
+ "deny": 97,
+ "sameorigin": 98,
+ }},
+}
diff --git a/vendor/github.com/quic-go/qpack/tools.go b/vendor/github.com/quic-go/qpack/tools.go
new file mode 100644
index 000000000..8f71eea26
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/tools.go
@@ -0,0 +1,5 @@
+//go:build tools
+
+package qpack
+
+import _ "github.com/onsi/ginkgo/v2/ginkgo"
diff --git a/vendor/github.com/quic-go/qpack/varint.go b/vendor/github.com/quic-go/qpack/varint.go
new file mode 100644
index 000000000..28d71122e
--- /dev/null
+++ b/vendor/github.com/quic-go/qpack/varint.go
@@ -0,0 +1,66 @@
+package qpack
+
+// copied from the Go standard library HPACK implementation
+
+import "errors"
+
+var errVarintOverflow = errors.New("varint integer overflow")
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1< 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/LICENSE b/vendor/github.com/quic-go/qtls-go1-19/LICENSE
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-16/LICENSE
rename to vendor/github.com/quic-go/qtls-go1-19/LICENSE
diff --git a/vendor/github.com/quic-go/qtls-go1-19/README.md b/vendor/github.com/quic-go/qtls-go1-19/README.md
new file mode 100644
index 000000000..bf41f1c5f
--- /dev/null
+++ b/vendor/github.com/quic-go/qtls-go1-19/README.md
@@ -0,0 +1,6 @@
+# qtls
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/quic-go/qtls-go1-19.svg)](https://pkg.go.dev/github.com/quic-go/qtls-go1-19)
+[![.github/workflows/go-test.yml](https://github.com/quic-go/qtls-go1-19/actions/workflows/go-test.yml/badge.svg)](https://github.com/quic-go/qtls-go1-19/actions/workflows/go-test.yml)
+
+This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go).
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/alert.go b/vendor/github.com/quic-go/qtls-go1-19/alert.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-16/alert.go
rename to vendor/github.com/quic-go/qtls-go1-19/alert.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/auth.go b/vendor/github.com/quic-go/qtls-go1-19/auth.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-19/auth.go
rename to vendor/github.com/quic-go/qtls-go1-19/auth.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/cipher_suites.go b/vendor/github.com/quic-go/qtls-go1-19/cipher_suites.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-19/cipher_suites.go
rename to vendor/github.com/quic-go/qtls-go1-19/cipher_suites.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/common.go b/vendor/github.com/quic-go/qtls-go1-19/common.go
similarity index 99%
rename from vendor/github.com/marten-seemann/qtls-go1-19/common.go
rename to vendor/github.com/quic-go/qtls-go1-19/common.go
index 6670ce2f7..63e391bf6 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/common.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/common.go
@@ -345,7 +345,8 @@ type clientSessionState struct {
// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not
// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which
// are supported via this interface.
-//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/marten-seemann/qtls-go1-17 ClientSessionCache"
+//
+//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/quic-go/qtls-go1-19 ClientSessionCache"
type ClientSessionCache = tls.ClientSessionCache
// SignatureScheme is a tls.SignatureScheme
@@ -1411,7 +1412,7 @@ func leafCertificate(c *Certificate) (*x509.Certificate, error) {
}
type handshakeMessage interface {
- marshal() []byte
+ marshal() ([]byte, error)
unmarshal([]byte) bool
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/conn.go b/vendor/github.com/quic-go/qtls-go1-19/conn.go
similarity index 96%
rename from vendor/github.com/marten-seemann/qtls-go1-19/conn.go
rename to vendor/github.com/quic-go/qtls-go1-19/conn.go
index 1b275a9f2..19f24e95f 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/conn.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/conn.go
@@ -125,6 +125,9 @@ type Conn struct {
used0RTT bool
tmp [16]byte
+
+ connStateMutex sync.Mutex
+ connState ConnectionStateWith0RTT
}
// Access to net.Conn methods.
@@ -1038,25 +1041,46 @@ func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
return n, nil
}
-// writeRecord writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
+// writeHandshakeRecord writes a handshake message to the connection and updates
+// the record layer state. If transcript is non-nil the marshalled message is
+// written to it.
+func (c *Conn) writeHandshakeRecord(msg handshakeMessage, transcript transcriptHash) (int, error) {
+ data, err := msg.marshal()
+ if err != nil {
+ return 0, err
+ }
+
+ c.out.Lock()
+ defer c.out.Unlock()
+
+ if transcript != nil {
+ transcript.Write(data)
+ }
+
if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- if typ == recordTypeChangeCipherSpec {
- return len(data), nil
- }
return c.extraConfig.AlternativeRecordLayer.WriteRecord(data)
}
+ return c.writeRecordLocked(recordTypeHandshake, data)
+}
+
+// writeChangeCipherRecord writes a ChangeCipherSpec message to the connection and
+// updates the record layer state.
+func (c *Conn) writeChangeCipherRecord() error {
+ if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
+ return nil
+ }
+
c.out.Lock()
defer c.out.Unlock()
-
- return c.writeRecordLocked(typ, data)
+ _, err := c.writeRecordLocked(recordTypeChangeCipherSpec, []byte{1})
+ return err
}
// readHandshake reads the next handshake message from
-// the record layer.
-func (c *Conn) readHandshake() (any, error) {
+// the record layer. If transcript is non-nil, the message
+// is written to the passed transcriptHash.
+func (c *Conn) readHandshake(transcript transcriptHash) (any, error) {
var data []byte
if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
var err error
@@ -1144,6 +1168,11 @@ func (c *Conn) readHandshake() (any, error) {
if !m.unmarshal(data) {
return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
+
+ if transcript != nil {
+ transcript.Write(data)
+ }
+
return m, nil
}
@@ -1219,7 +1248,7 @@ func (c *Conn) handleRenegotiation() error {
return errors.New("tls: internal error: unexpected renegotiation")
}
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -1269,7 +1298,7 @@ func (c *Conn) handlePostHandshakeMessage() error {
return c.handleRenegotiation()
}
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -1305,7 +1334,11 @@ func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
defer c.out.Unlock()
msg := &keyUpdateMsg{}
- _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
+ msgBytes, err := msg.marshal()
+ if err != nil {
+ return err
+ }
+ _, err = c.writeRecordLocked(recordTypeHandshake, msgBytes)
if err != nil {
// Surface the error at the next write.
c.out.setErrorLocked(err)
@@ -1535,19 +1568,16 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
// ConnectionState returns basic TLS details about the connection.
func (c *Conn) ConnectionState() ConnectionState {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return c.connectionStateLocked()
+ c.connStateMutex.Lock()
+ defer c.connStateMutex.Unlock()
+ return c.connState.ConnectionState
}
// ConnectionStateWith0RTT returns basic TLS details (incl. 0-RTT status) about the connection.
func (c *Conn) ConnectionStateWith0RTT() ConnectionStateWith0RTT {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return ConnectionStateWith0RTT{
- ConnectionState: c.connectionStateLocked(),
- Used0RTT: c.used0RTT,
- }
+ c.connStateMutex.Lock()
+ defer c.connStateMutex.Unlock()
+ return c.connState
}
func (c *Conn) connectionStateLocked() ConnectionState {
@@ -1578,6 +1608,15 @@ func (c *Conn) connectionStateLocked() ConnectionState {
return toConnectionState(state)
}
+func (c *Conn) updateConnectionState() {
+ c.connStateMutex.Lock()
+ defer c.connStateMutex.Unlock()
+ c.connState = ConnectionStateWith0RTT{
+ Used0RTT: c.used0RTT,
+ ConnectionState: c.connectionStateLocked(),
+ }
+}
+
// OCSPResponse returns the stapled OCSP response from the TLS server, if
// any. (Only valid for client connections.)
func (c *Conn) OCSPResponse() []byte {
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/cpu.go b/vendor/github.com/quic-go/qtls-go1-19/cpu.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/cpu.go
rename to vendor/github.com/quic-go/qtls-go1-19/cpu.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/cpu_other.go b/vendor/github.com/quic-go/qtls-go1-19/cpu_other.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/cpu_other.go
rename to vendor/github.com/quic-go/qtls-go1-19/cpu_other.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_client.go
similarity index 91%
rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_client.go
rename to vendor/github.com/quic-go/qtls-go1-19/handshake_client.go
index 4407683a9..8d1fae015 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_client.go
@@ -144,22 +144,13 @@ func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
var params ecdheParameters
if hello.supportedVersions[0] == VersionTLS13 {
- var suites []uint16
- for _, suiteID := range configCipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- suites = append(suites, suiteID)
- }
- }
+ if len(hello.supportedVersions) == 1 {
+ hello.cipherSuites = hello.cipherSuites[:0]
}
- if len(suites) > 0 {
- hello.cipherSuites = suites
+ if hasAESGCMHardwareSupport {
+ hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
} else {
- if hasAESGCMHardwareSupport {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
- } else {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
- }
+ hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
}
curveID := config.curvePreferences()[0]
@@ -196,7 +187,10 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
}
c.serverName = hello.serverName
- cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
+ cacheKey, session, earlySecret, binderKey, err := c.loadSession(hello)
+ if err != nil {
+ return err
+ }
if cacheKey != "" && session != nil {
var deletedTicket bool
if session.vers == VersionTLS13 && hello.earlyData && c.extraConfig != nil && c.extraConfig.Enable0RTT {
@@ -206,11 +200,14 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
if suite := cipherSuiteTLS13ByID(session.cipherSuite); suite != nil {
h := suite.hash.New()
- h.Write(hello.marshal())
+ helloBytes, err := hello.marshal()
+ if err != nil {
+ return err
+ }
+ h.Write(helloBytes)
clientEarlySecret := suite.deriveSecret(earlySecret, "c e traffic", h)
c.out.exportKey(Encryption0RTT, suite, clientEarlySecret)
if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hello.random, clientEarlySecret); err != nil {
- c.sendAlert(alertInternalError)
return err
}
}
@@ -230,11 +227,12 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
}
}
- if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
+ if _, err := c.writeHandshakeRecord(hello, nil); err != nil {
return err
}
- msg, err := c.readHandshake()
+ // serverHelloMsg is not included in the transcript
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -295,6 +293,7 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
c.config.ClientSessionCache.Put(cacheKey, toClientSessionState(hs.session))
}
+ c.updateConnectionState()
return nil
}
@@ -326,9 +325,9 @@ func (c *Conn) decodeSessionState(session *clientSessionState) (uint32 /* max ea
}
func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
- session *clientSessionState, earlySecret, binderKey []byte) {
+ session *clientSessionState, earlySecret, binderKey []byte, err error) {
if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return "", nil, nil, nil
+ return "", nil, nil, nil, nil
}
hello.ticketSupported = true
@@ -343,14 +342,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
// renegotiation is primarily used to allow a client to send a client
// certificate, which would be skipped if session resumption occurred.
if c.handshakes != 0 {
- return "", nil, nil, nil
+ return "", nil, nil, nil, nil
}
// Try to resume a previously negotiated TLS session, if available.
cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
sess, ok := c.config.ClientSessionCache.Get(cacheKey)
if !ok || sess == nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
session = fromClientSessionState(sess)
@@ -361,7 +360,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
maxEarlyData, appData, ok = c.decodeSessionState(session)
if !ok { // delete it, if parsing failed
c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
}
@@ -374,7 +373,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
}
}
if !versOk {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
// Check that the cached server certificate is not expired, and that it's
@@ -383,16 +382,16 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
if !c.config.InsecureSkipVerify {
if len(session.verifiedChains) == 0 {
// The original connection had InsecureSkipVerify, while this doesn't.
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
serverCert := session.serverCertificates[0]
if c.config.time().After(serverCert.NotAfter) {
// Expired certificate, delete the entry.
c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
}
@@ -400,7 +399,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
// In TLS 1.2 the cipher suite must match the resumed session. Ensure we
// are still offering it.
if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
hello.sessionTicket = session.sessionTicket
@@ -410,14 +409,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
// Check that the session ticket is not expired.
if c.config.time().After(session.useBy) {
c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
// In TLS 1.3 the KDF hash must match the resumed session. Ensure we
// offer at least one cipher suite with that hash.
cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
if cipherSuite == nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
cipherSuiteOk := false
for _, offeredID := range hello.cipherSuites {
@@ -428,7 +427,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
}
}
if !cipherSuiteOk {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
// Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
@@ -449,9 +448,15 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
hello.earlyData = c.extraConfig.Enable0RTT && maxEarlyData > 0
}
transcript := cipherSuite.hash.New()
- transcript.Write(hello.marshalWithoutBinders())
+ helloBytes, err := hello.marshalWithoutBinders()
+ if err != nil {
+ return "", nil, nil, nil, err
+ }
+ transcript.Write(helloBytes)
pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
- hello.updateBinders(pskBinders)
+ if err := hello.updateBinders(pskBinders); err != nil {
+ return "", nil, nil, nil, err
+ }
if session.vers == VersionTLS13 && c.extraConfig != nil && c.extraConfig.SetAppDataFromSessionState != nil {
c.extraConfig.SetAppDataFromSessionState(appData)
@@ -499,8 +504,12 @@ func (hs *clientHandshakeState) handshake() error {
hs.finishedHash.discardHandshakeBuffer()
}
- hs.finishedHash.Write(hs.hello.marshal())
- hs.finishedHash.Write(hs.serverHello.marshal())
+ if err := transcriptMsg(hs.hello, &hs.finishedHash); err != nil {
+ return err
+ }
+ if err := transcriptMsg(hs.serverHello, &hs.finishedHash); err != nil {
+ return err
+ }
c.buffering = true
c.didResume = isResume
@@ -571,7 +580,7 @@ func (hs *clientHandshakeState) pickCipherSuite() error {
func (hs *clientHandshakeState) doFullHandshake() error {
c := hs.c
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -580,9 +589,8 @@ func (hs *clientHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
- hs.finishedHash.Write(certMsg.marshal())
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -600,11 +608,10 @@ func (hs *clientHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return errors.New("tls: received unexpected CertificateStatus message")
}
- hs.finishedHash.Write(cs.marshal())
c.ocspResponse = cs.response
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -633,14 +640,13 @@ func (hs *clientHandshakeState) doFullHandshake() error {
skx, ok := msg.(*serverKeyExchangeMsg)
if ok {
- hs.finishedHash.Write(skx.marshal())
err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
if err != nil {
c.sendAlert(alertUnexpectedMessage)
return err
}
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -651,7 +657,6 @@ func (hs *clientHandshakeState) doFullHandshake() error {
certReq, ok := msg.(*certificateRequestMsg)
if ok {
certRequested = true
- hs.finishedHash.Write(certReq.marshal())
cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq)
if chainToSend, err = c.getClientCertificate(cri); err != nil {
@@ -659,7 +664,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
return err
}
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -670,7 +675,6 @@ func (hs *clientHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(shd, msg)
}
- hs.finishedHash.Write(shd.marshal())
// If the server requested a certificate then we have to send a
// Certificate message, even if it's empty because we don't have a
@@ -678,8 +682,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
if certRequested {
certMsg = new(certificateMsg)
certMsg.certificates = chainToSend.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil {
return err
}
}
@@ -690,8 +693,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
return err
}
if ckx != nil {
- hs.finishedHash.Write(ckx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(ckx, &hs.finishedHash); err != nil {
return err
}
}
@@ -738,8 +740,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
return err
}
- hs.finishedHash.Write(certVerify.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certVerify, &hs.finishedHash); err != nil {
return err
}
}
@@ -874,7 +875,10 @@ func (hs *clientHandshakeState) readFinished(out []byte) error {
return err
}
- msg, err := c.readHandshake()
+ // finishedMsg is included in the transcript, but not until after we
+ // check the client version, since the state before this message was
+ // sent is used during verification.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -890,7 +894,11 @@ func (hs *clientHandshakeState) readFinished(out []byte) error {
c.sendAlert(alertHandshakeFailure)
return errors.New("tls: server's Finished message was incorrect")
}
- hs.finishedHash.Write(serverFinished.marshal())
+
+ if err := transcriptMsg(serverFinished, &hs.finishedHash); err != nil {
+ return err
+ }
+
copy(out, verify)
return nil
}
@@ -901,7 +909,7 @@ func (hs *clientHandshakeState) readSessionTicket() error {
}
c := hs.c
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -910,7 +918,6 @@ func (hs *clientHandshakeState) readSessionTicket() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(sessionTicketMsg, msg)
}
- hs.finishedHash.Write(sessionTicketMsg.marshal())
hs.session = &clientSessionState{
sessionTicket: sessionTicketMsg.ticket,
@@ -930,20 +937,23 @@ func (hs *clientHandshakeState) readSessionTicket() error {
func (hs *clientHandshakeState) sendFinished(out []byte) error {
c := hs.c
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ if err := c.writeChangeCipherRecord(); err != nil {
return err
}
finished := new(finishedMsg)
finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil {
return err
}
copy(out, finished.verifyData)
return nil
}
+// maxRSAKeySize is the maximum RSA key size in bits that we are willing
+// to verify the signatures of during a TLS handshake.
+const maxRSAKeySize = 8192
+
// verifyServerCertificate parses and verifies the provided chain, setting
// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
@@ -954,6 +964,10 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
c.sendAlert(alertBadCertificate)
return errors.New("tls: failed to parse certificate from server: " + err.Error())
}
+ if cert.PublicKeyAlgorithm == x509.RSA && cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
+ c.sendAlert(alertBadCertificate)
+ return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
+ }
certs[i] = cert
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client_tls13.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_client_tls13.go
similarity index 92%
rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_client_tls13.go
rename to vendor/github.com/quic-go/qtls-go1-19/handshake_client_tls13.go
index 7f05f2c6c..05ca1333b 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client_tls13.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_client_tls13.go
@@ -65,7 +65,10 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
}
hs.transcript = hs.suite.hash.New()
- hs.transcript.Write(hs.hello.marshal())
+
+ if err := transcriptMsg(hs.hello, hs.transcript); err != nil {
+ return err
+ }
if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
if err := hs.sendDummyChangeCipherSpec(); err != nil {
@@ -76,12 +79,15 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
}
}
- hs.transcript.Write(hs.serverHello.marshal())
+ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
+ return err
+ }
c.buffering = true
if err := hs.processServerHello(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.sendDummyChangeCipherSpec(); err != nil {
return err
}
@@ -94,6 +100,7 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
if err := hs.readServerCertificate(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.readServerFinished(); err != nil {
return err
}
@@ -108,7 +115,7 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
}
atomic.StoreUint32(&c.handshakeStatus, 1)
-
+ c.updateConnectionState()
return nil
}
@@ -175,8 +182,7 @@ func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
}
hs.sentDummyCCS = true
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
+ return hs.c.writeChangeCipherRecord()
}
// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
@@ -191,7 +197,9 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
hs.transcript.Reset()
hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
hs.transcript.Write(chHash)
- hs.transcript.Write(hs.serverHello.marshal())
+ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
+ return err
+ }
// The only HelloRetryRequest extensions we support are key_share and
// cookie, and clients must abort the handshake if the HRR would not result
@@ -256,10 +264,18 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
transcript := hs.suite.hash.New()
transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
transcript.Write(chHash)
- transcript.Write(hs.serverHello.marshal())
- transcript.Write(hs.hello.marshalWithoutBinders())
+ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
+ return err
+ }
+ helloBytes, err := hs.hello.marshalWithoutBinders()
+ if err != nil {
+ return err
+ }
+ transcript.Write(helloBytes)
pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
- hs.hello.updateBinders(pskBinders)
+ if err := hs.hello.updateBinders(pskBinders); err != nil {
+ return err
+ }
} else {
// Server selected a cipher suite incompatible with the PSK.
hs.hello.pskIdentities = nil
@@ -271,13 +287,12 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
c.extraConfig.Rejected0RTT()
}
hs.hello.earlyData = false // disable 0-RTT
-
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil {
return err
}
- msg, err := c.readHandshake()
+ // serverHelloMsg is not included in the transcript
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -366,6 +381,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
if !hs.usingPSK {
earlySecret = hs.suite.extract(nil, nil)
}
+
handshakeSecret := hs.suite.extract(sharedKey,
hs.suite.deriveSecret(earlySecret, "derived", nil))
@@ -398,7 +414,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
func (hs *clientHandshakeStateTLS13) readServerParameters() error {
c := hs.c
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(hs.transcript)
if err != nil {
return err
}
@@ -416,7 +432,6 @@ func (hs *clientHandshakeStateTLS13) readServerParameters() error {
if hs.c.extraConfig != nil && hs.c.extraConfig.ReceivedExtensions != nil {
hs.c.extraConfig.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions)
}
- hs.transcript.Write(encryptedExtensions.marshal())
if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil {
c.sendAlert(alertUnsupportedExtension)
@@ -452,18 +467,16 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
return nil
}
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(hs.transcript)
if err != nil {
return err
}
certReq, ok := msg.(*certificateRequestMsgTLS13)
if ok {
- hs.transcript.Write(certReq.marshal())
-
hs.certReq = certReq
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(hs.transcript)
if err != nil {
return err
}
@@ -478,7 +491,6 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
c.sendAlert(alertDecodeError)
return errors.New("tls: received empty certificates message")
}
- hs.transcript.Write(certMsg.marshal())
c.scts = certMsg.certificate.SignedCertificateTimestamps
c.ocspResponse = certMsg.certificate.OCSPStaple
@@ -487,7 +499,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
return err
}
- msg, err = c.readHandshake()
+ // certificateVerifyMsg is included in the transcript, but not until
+ // after we verify the handshake signature, since the state before
+ // this message was sent is used.
+ msg, err = c.readHandshake(nil)
if err != nil {
return err
}
@@ -518,7 +533,9 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
return errors.New("tls: invalid signature by the server certificate: " + err.Error())
}
- hs.transcript.Write(certVerify.marshal())
+ if err := transcriptMsg(certVerify, hs.transcript); err != nil {
+ return err
+ }
return nil
}
@@ -526,7 +543,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
func (hs *clientHandshakeStateTLS13) readServerFinished() error {
c := hs.c
- msg, err := c.readHandshake()
+ // finishedMsg is included in the transcript, but not until after we
+ // check the client version, since the state before this message was
+ // sent is used during verification.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -543,7 +563,9 @@ func (hs *clientHandshakeStateTLS13) readServerFinished() error {
return errors.New("tls: invalid server finished hash")
}
- hs.transcript.Write(finished.marshal())
+ if err := transcriptMsg(finished, hs.transcript); err != nil {
+ return err
+ }
// Derive secrets that take context through the server Finished.
@@ -593,8 +615,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil {
return err
}
@@ -631,8 +652,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
}
certVerifyMsg.signature = sig
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil {
return err
}
@@ -646,8 +666,7 @@ func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
}
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil {
return err
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_messages.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_messages.go
similarity index 76%
rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_messages.go
rename to vendor/github.com/quic-go/qtls-go1-19/handshake_messages.go
index 07193c8ef..c69fcefda 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_messages.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_messages.go
@@ -5,6 +5,7 @@
package qtls
import (
+ "errors"
"fmt"
"strings"
@@ -95,9 +96,187 @@ type clientHelloMsg struct {
additionalExtensions []Extension
}
-func (m *clientHelloMsg) marshal() []byte {
+func (m *clientHelloMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
+ }
+
+ var exts cryptobyte.Builder
+ if len(m.serverName) > 0 {
+ // RFC 6066, Section 3
+ exts.AddUint16(extensionServerName)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8(0) // name_type = host_name
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes([]byte(m.serverName))
+ })
+ })
+ })
+ }
+ if m.ocspStapling {
+ // RFC 4366, Section 3.6
+ exts.AddUint16(extensionStatusRequest)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8(1) // status_type = ocsp
+ exts.AddUint16(0) // empty responder_id_list
+ exts.AddUint16(0) // empty request_extensions
+ })
+ }
+ if len(m.supportedCurves) > 0 {
+ // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
+ exts.AddUint16(extensionSupportedCurves)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, curve := range m.supportedCurves {
+ exts.AddUint16(uint16(curve))
+ }
+ })
+ })
+ }
+ if len(m.supportedPoints) > 0 {
+ // RFC 4492, Section 5.1.2
+ exts.AddUint16(extensionSupportedPoints)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.supportedPoints)
+ })
+ })
+ }
+ if m.ticketSupported {
+ // RFC 5077, Section 3.2
+ exts.AddUint16(extensionSessionTicket)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.sessionTicket)
+ })
+ }
+ if len(m.supportedSignatureAlgorithms) > 0 {
+ // RFC 5246, Section 7.4.1.4.1
+ exts.AddUint16(extensionSignatureAlgorithms)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithms {
+ exts.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if len(m.supportedSignatureAlgorithmsCert) > 0 {
+ // RFC 8446, Section 4.2.3
+ exts.AddUint16(extensionSignatureAlgorithmsCert)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
+ exts.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if m.secureRenegotiationSupported {
+ // RFC 5746, Section 3.2
+ exts.AddUint16(extensionRenegotiationInfo)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.secureRenegotiation)
+ })
+ })
+ }
+ if len(m.alpnProtocols) > 0 {
+ // RFC 7301, Section 3.1
+ exts.AddUint16(extensionALPN)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, proto := range m.alpnProtocols {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes([]byte(proto))
+ })
+ }
+ })
+ })
+ }
+ if m.scts {
+ // RFC 6962, Section 3.3.1
+ exts.AddUint16(extensionSCT)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if len(m.supportedVersions) > 0 {
+ // RFC 8446, Section 4.2.1
+ exts.AddUint16(extensionSupportedVersions)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, vers := range m.supportedVersions {
+ exts.AddUint16(vers)
+ }
+ })
+ })
+ }
+ if len(m.cookie) > 0 {
+ // RFC 8446, Section 4.2.2
+ exts.AddUint16(extensionCookie)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.cookie)
+ })
+ })
+ }
+ if len(m.keyShares) > 0 {
+ // RFC 8446, Section 4.2.8
+ exts.AddUint16(extensionKeyShare)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, ks := range m.keyShares {
+ exts.AddUint16(uint16(ks.group))
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(ks.data)
+ })
+ }
+ })
+ })
+ }
+ if m.earlyData {
+ // RFC 8446, Section 4.2.10
+ exts.AddUint16(extensionEarlyData)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if len(m.pskModes) > 0 {
+ // RFC 8446, Section 4.2.9
+ exts.AddUint16(extensionPSKModes)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.pskModes)
+ })
+ })
+ }
+ for _, ext := range m.additionalExtensions {
+ exts.AddUint16(ext.Type)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(ext.Data)
+ })
+ }
+ if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
+ // RFC 8446, Section 4.2.11
+ exts.AddUint16(extensionPreSharedKey)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, psk := range m.pskIdentities {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(psk.label)
+ })
+ exts.AddUint32(psk.obfuscatedTicketAge)
+ }
+ })
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, binder := range m.pskBinders {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(binder)
+ })
+ }
+ })
+ })
+ }
+ extBytes, err := exts.Bytes()
+ if err != nil {
+ return nil, err
}
var b cryptobyte.Builder
@@ -117,225 +296,53 @@ func (m *clientHelloMsg) marshal() []byte {
b.AddBytes(m.compressionMethods)
})
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.serverName) > 0 {
- // RFC 6066, Section 3
- b.AddUint16(extensionServerName)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // name_type = host_name
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.serverName))
- })
- })
- })
- }
- if m.ocspStapling {
- // RFC 4366, Section 3.6
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(1) // status_type = ocsp
- b.AddUint16(0) // empty responder_id_list
- b.AddUint16(0) // empty request_extensions
- })
- }
- if len(m.supportedCurves) > 0 {
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- b.AddUint16(extensionSupportedCurves)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, curve := range m.supportedCurves {
- b.AddUint16(uint16(curve))
- }
- })
- })
- }
- if len(m.supportedPoints) > 0 {
- // RFC 4492, Section 5.1.2
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
- if m.ticketSupported {
- // RFC 5077, Section 3.2
- b.AddUint16(extensionSessionTicket)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionTicket)
- })
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- // RFC 5246, Section 7.4.1.4.1
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- // RFC 8446, Section 4.2.3
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if m.secureRenegotiationSupported {
- // RFC 5746, Section 3.2
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocols) > 0 {
- // RFC 7301, Section 3.1
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, proto := range m.alpnProtocols {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(proto))
- })
- }
- })
- })
- }
- if m.scts {
- // RFC 6962, Section 3.3.1
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedVersions) > 0 {
- // RFC 8446, Section 4.2.1
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, vers := range m.supportedVersions {
- b.AddUint16(vers)
- }
- })
- })
- }
- if len(m.cookie) > 0 {
- // RFC 8446, Section 4.2.2
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if len(m.keyShares) > 0 {
- // RFC 8446, Section 4.2.8
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ks := range m.keyShares {
- b.AddUint16(uint16(ks.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ks.data)
- })
- }
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.pskModes) > 0 {
- // RFC 8446, Section 4.2.9
- b.AddUint16(extensionPSKModes)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.pskModes)
- })
- })
- }
- for _, ext := range m.additionalExtensions {
- b.AddUint16(ext.Type)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ext.Data)
- })
- }
- if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
- // RFC 8446, Section 4.2.11
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, psk := range m.pskIdentities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(psk.label)
- })
- b.AddUint32(psk.obfuscatedTicketAge)
- }
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
+ if len(extBytes) > 0 {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(extBytes)
+ })
}
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
// marshalWithoutBinders returns the ClientHello through the
// PreSharedKeyExtension.identities field, according to RFC 8446, Section
// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
-func (m *clientHelloMsg) marshalWithoutBinders() []byte {
+func (m *clientHelloMsg) marshalWithoutBinders() ([]byte, error) {
bindersLen := 2 // uint16 length prefix
for _, binder := range m.pskBinders {
bindersLen += 1 // uint8 length prefix
bindersLen += len(binder)
}
- fullMessage := m.marshal()
- return fullMessage[:len(fullMessage)-bindersLen]
+ fullMessage, err := m.marshal()
+ if err != nil {
+ return nil, err
+ }
+ return fullMessage[:len(fullMessage)-bindersLen], nil
}
// updateBinders updates the m.pskBinders field, if necessary updating the
// cached marshaled representation. The supplied binders must have the same
// length as the current m.pskBinders.
-func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
+func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) error {
if len(pskBinders) != len(m.pskBinders) {
- panic("tls: internal error: pskBinders length mismatch")
+ return errors.New("tls: internal error: pskBinders length mismatch")
}
for i := range m.pskBinders {
if len(pskBinders[i]) != len(m.pskBinders[i]) {
- panic("tls: internal error: pskBinders length mismatch")
+ return errors.New("tls: internal error: pskBinders length mismatch")
}
}
m.pskBinders = pskBinders
if m.raw != nil {
- lenWithoutBinders := len(m.marshalWithoutBinders())
+ helloBytes, err := m.marshalWithoutBinders()
+ if err != nil {
+ return err
+ }
+ lenWithoutBinders := len(helloBytes)
b := cryptobyte.NewFixedBuilder(m.raw[:lenWithoutBinders])
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
for _, binder := range m.pskBinders {
@@ -345,9 +352,11 @@ func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
}
})
if out, err := b.Bytes(); err != nil || len(out) != len(m.raw) {
- panic("tls: internal error: failed to update binders")
+ return errors.New("tls: internal error: failed to update binders")
}
}
+
+ return nil
}
func (m *clientHelloMsg) unmarshal(data []byte) bool {
@@ -625,9 +634,98 @@ type serverHelloMsg struct {
selectedGroup CurveID
}
-func (m *serverHelloMsg) marshal() []byte {
+func (m *serverHelloMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
+ }
+
+ var exts cryptobyte.Builder
+ if m.ocspStapling {
+ exts.AddUint16(extensionStatusRequest)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if m.ticketSupported {
+ exts.AddUint16(extensionSessionTicket)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if m.secureRenegotiationSupported {
+ exts.AddUint16(extensionRenegotiationInfo)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.secureRenegotiation)
+ })
+ })
+ }
+ if len(m.alpnProtocol) > 0 {
+ exts.AddUint16(extensionALPN)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes([]byte(m.alpnProtocol))
+ })
+ })
+ })
+ }
+ if len(m.scts) > 0 {
+ exts.AddUint16(extensionSCT)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, sct := range m.scts {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(sct)
+ })
+ }
+ })
+ })
+ }
+ if m.supportedVersion != 0 {
+ exts.AddUint16(extensionSupportedVersions)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(m.supportedVersion)
+ })
+ }
+ if m.serverShare.group != 0 {
+ exts.AddUint16(extensionKeyShare)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(uint16(m.serverShare.group))
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.serverShare.data)
+ })
+ })
+ }
+ if m.selectedIdentityPresent {
+ exts.AddUint16(extensionPreSharedKey)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(m.selectedIdentity)
+ })
+ }
+
+ if len(m.cookie) > 0 {
+ exts.AddUint16(extensionCookie)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.cookie)
+ })
+ })
+ }
+ if m.selectedGroup != 0 {
+ exts.AddUint16(extensionKeyShare)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(uint16(m.selectedGroup))
+ })
+ }
+ if len(m.supportedPoints) > 0 {
+ exts.AddUint16(extensionSupportedPoints)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.supportedPoints)
+ })
+ })
+ }
+
+ extBytes, err := exts.Bytes()
+ if err != nil {
+ return nil, err
}
var b cryptobyte.Builder
@@ -641,104 +739,15 @@ func (m *serverHelloMsg) marshal() []byte {
b.AddUint16(m.cipherSuite)
b.AddUint8(m.compressionMethod)
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.ticketSupported {
- b.AddUint16(extensionSessionTicket)
- b.AddUint16(0) // empty extension_data
- }
- if m.secureRenegotiationSupported {
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if len(m.scts) > 0 {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range m.scts {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- if m.supportedVersion != 0 {
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.supportedVersion)
- })
- }
- if m.serverShare.group != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.serverShare.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.serverShare.data)
- })
- })
- }
- if m.selectedIdentityPresent {
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.selectedIdentity)
- })
- }
-
- if len(m.cookie) > 0 {
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if m.selectedGroup != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.selectedGroup))
- })
- }
- if len(m.supportedPoints) > 0 {
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
+ if len(extBytes) > 0 {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(extBytes)
+ })
}
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *serverHelloMsg) unmarshal(data []byte) bool {
@@ -865,9 +874,9 @@ type encryptedExtensionsMsg struct {
additionalExtensions []Extension
}
-func (m *encryptedExtensionsMsg) marshal() []byte {
+func (m *encryptedExtensionsMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -898,8 +907,9 @@ func (m *encryptedExtensionsMsg) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
@@ -949,10 +959,10 @@ func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
type endOfEarlyDataMsg struct{}
-func (m *endOfEarlyDataMsg) marshal() []byte {
+func (m *endOfEarlyDataMsg) marshal() ([]byte, error) {
x := make([]byte, 4)
x[0] = typeEndOfEarlyData
- return x
+ return x, nil
}
func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
@@ -964,9 +974,9 @@ type keyUpdateMsg struct {
updateRequested bool
}
-func (m *keyUpdateMsg) marshal() []byte {
+func (m *keyUpdateMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -979,8 +989,9 @@ func (m *keyUpdateMsg) marshal() []byte {
}
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *keyUpdateMsg) unmarshal(data []byte) bool {
@@ -1012,9 +1023,9 @@ type newSessionTicketMsgTLS13 struct {
maxEarlyData uint32
}
-func (m *newSessionTicketMsgTLS13) marshal() []byte {
+func (m *newSessionTicketMsgTLS13) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1039,8 +1050,9 @@ func (m *newSessionTicketMsgTLS13) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
@@ -1093,9 +1105,9 @@ type certificateRequestMsgTLS13 struct {
certificateAuthorities [][]byte
}
-func (m *certificateRequestMsgTLS13) marshal() []byte {
+func (m *certificateRequestMsgTLS13) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1154,8 +1166,9 @@ func (m *certificateRequestMsgTLS13) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
@@ -1239,9 +1252,9 @@ type certificateMsg struct {
certificates [][]byte
}
-func (m *certificateMsg) marshal() (x []byte) {
+func (m *certificateMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var i int
@@ -1250,7 +1263,7 @@ func (m *certificateMsg) marshal() (x []byte) {
}
length := 3 + 3*len(m.certificates) + i
- x = make([]byte, 4+length)
+ x := make([]byte, 4+length)
x[0] = typeCertificate
x[1] = uint8(length >> 16)
x[2] = uint8(length >> 8)
@@ -1271,7 +1284,7 @@ func (m *certificateMsg) marshal() (x []byte) {
}
m.raw = x
- return
+ return m.raw, nil
}
func (m *certificateMsg) unmarshal(data []byte) bool {
@@ -1318,9 +1331,9 @@ type certificateMsgTLS13 struct {
scts bool
}
-func (m *certificateMsgTLS13) marshal() []byte {
+func (m *certificateMsgTLS13) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1338,8 +1351,9 @@ func (m *certificateMsgTLS13) marshal() []byte {
marshalCertificate(b, certificate)
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
@@ -1462,9 +1476,9 @@ type serverKeyExchangeMsg struct {
key []byte
}
-func (m *serverKeyExchangeMsg) marshal() []byte {
+func (m *serverKeyExchangeMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
length := len(m.key)
x := make([]byte, length+4)
@@ -1475,7 +1489,7 @@ func (m *serverKeyExchangeMsg) marshal() []byte {
copy(x[4:], m.key)
m.raw = x
- return x
+ return x, nil
}
func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
@@ -1492,9 +1506,9 @@ type certificateStatusMsg struct {
response []byte
}
-func (m *certificateStatusMsg) marshal() []byte {
+func (m *certificateStatusMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1506,8 +1520,9 @@ func (m *certificateStatusMsg) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *certificateStatusMsg) unmarshal(data []byte) bool {
@@ -1526,10 +1541,10 @@ func (m *certificateStatusMsg) unmarshal(data []byte) bool {
type serverHelloDoneMsg struct{}
-func (m *serverHelloDoneMsg) marshal() []byte {
+func (m *serverHelloDoneMsg) marshal() ([]byte, error) {
x := make([]byte, 4)
x[0] = typeServerHelloDone
- return x
+ return x, nil
}
func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
@@ -1541,9 +1556,9 @@ type clientKeyExchangeMsg struct {
ciphertext []byte
}
-func (m *clientKeyExchangeMsg) marshal() []byte {
+func (m *clientKeyExchangeMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
length := len(m.ciphertext)
x := make([]byte, length+4)
@@ -1554,7 +1569,7 @@ func (m *clientKeyExchangeMsg) marshal() []byte {
copy(x[4:], m.ciphertext)
m.raw = x
- return x
+ return x, nil
}
func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
@@ -1575,9 +1590,9 @@ type finishedMsg struct {
verifyData []byte
}
-func (m *finishedMsg) marshal() []byte {
+func (m *finishedMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1586,8 +1601,9 @@ func (m *finishedMsg) marshal() []byte {
b.AddBytes(m.verifyData)
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *finishedMsg) unmarshal(data []byte) bool {
@@ -1609,9 +1625,9 @@ type certificateRequestMsg struct {
certificateAuthorities [][]byte
}
-func (m *certificateRequestMsg) marshal() (x []byte) {
+func (m *certificateRequestMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
// See RFC 4346, Section 7.4.4.
@@ -1626,7 +1642,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
length += 2 + 2*len(m.supportedSignatureAlgorithms)
}
- x = make([]byte, 4+length)
+ x := make([]byte, 4+length)
x[0] = typeCertificateRequest
x[1] = uint8(length >> 16)
x[2] = uint8(length >> 8)
@@ -1661,7 +1677,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
}
m.raw = x
- return
+ return m.raw, nil
}
func (m *certificateRequestMsg) unmarshal(data []byte) bool {
@@ -1747,9 +1763,9 @@ type certificateVerifyMsg struct {
signature []byte
}
-func (m *certificateVerifyMsg) marshal() (x []byte) {
+func (m *certificateVerifyMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1763,8 +1779,9 @@ func (m *certificateVerifyMsg) marshal() (x []byte) {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
@@ -1787,15 +1804,15 @@ type newSessionTicketMsg struct {
ticket []byte
}
-func (m *newSessionTicketMsg) marshal() (x []byte) {
+func (m *newSessionTicketMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
// See RFC 5077, Section 3.3.
ticketLen := len(m.ticket)
length := 2 + 4 + ticketLen
- x = make([]byte, 4+length)
+ x := make([]byte, 4+length)
x[0] = typeNewSessionTicket
x[1] = uint8(length >> 16)
x[2] = uint8(length >> 8)
@@ -1806,7 +1823,7 @@ func (m *newSessionTicketMsg) marshal() (x []byte) {
m.raw = x
- return
+ return m.raw, nil
}
func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
@@ -1834,10 +1851,25 @@ func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
type helloRequestMsg struct {
}
-func (*helloRequestMsg) marshal() []byte {
- return []byte{typeHelloRequest, 0, 0, 0}
+func (*helloRequestMsg) marshal() ([]byte, error) {
+ return []byte{typeHelloRequest, 0, 0, 0}, nil
}
func (*helloRequestMsg) unmarshal(data []byte) bool {
return len(data) == 4
}
+
+type transcriptHash interface {
+ Write([]byte) (int, error)
+}
+
+// transcriptMsg is a helper used to marshal and hash messages which typically
+// are not written to the wire, and as such aren't hashed during Conn.writeRecord.
+func transcriptMsg(msg handshakeMessage, h transcriptHash) error {
+ data, err := msg.marshal()
+ if err != nil {
+ return err
+ }
+ h.Write(data)
+ return nil
+}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_server.go
similarity index 90%
rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_server.go
rename to vendor/github.com/quic-go/qtls-go1-19/handshake_server.go
index e93874af6..738fc9471 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_server.go
@@ -132,12 +132,15 @@ func (hs *serverHandshakeState) handshake() error {
c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
atomic.StoreUint32(&c.handshakeStatus, 1)
+ c.updateConnectionState()
return nil
}
// readClientHello reads a ClientHello message and selects the protocol version.
func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) {
- msg, err := c.readHandshake()
+ // clientHelloMsg is included in the transcript, but we haven't initialized
+ // it yet. The respective handshake functions will record it themselves.
+ msg, err := c.readHandshake(nil)
if err != nil {
return nil, err
}
@@ -270,7 +273,7 @@ func (hs *serverHandshakeState) processClientHello() error {
hs.ecdheOk = supportsECDHE(c.config, hs.clientHello.supportedCurves, hs.clientHello.supportedPoints)
- if hs.ecdheOk {
+ if hs.ecdheOk && len(hs.clientHello.supportedPoints) > 0 {
// Although omitting the ec_point_formats extension is permitted, some
// old OpenSSL version will refuse to handshake if not present.
//
@@ -351,6 +354,13 @@ func supportsECDHE(c *config, supportedCurves []CurveID, supportedPoints []uint8
break
}
}
+ // Per RFC 8422, Section 5.1.2, if the Supported Point Formats extension is
+ // missing, uncompressed points are supported. If supportedPoints is empty,
+ // the extension must be missing, as an empty extension body is rejected by
+ // the parser. See https://go.dev/issue/49126.
+ if len(supportedPoints) == 0 {
+ supportsPointFormat = true
+ }
return supportsCurve && supportsPointFormat
}
@@ -486,9 +496,10 @@ func (hs *serverHandshakeState) doResumeHandshake() error {
hs.hello.ticketSupported = hs.sessionState.usedOldKey
hs.finishedHash = newFinishedHash(c.vers, hs.suite)
hs.finishedHash.discardHandshakeBuffer()
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil {
+ return err
+ }
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil {
return err
}
@@ -526,24 +537,23 @@ func (hs *serverHandshakeState) doFullHandshake() error {
// certificates won't be used.
hs.finishedHash.discardHandshakeBuffer()
}
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil {
+ return err
+ }
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil {
return err
}
certMsg := new(certificateMsg)
certMsg.certificates = hs.cert.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil {
return err
}
if hs.hello.ocspStapling {
certStatus := new(certificateStatusMsg)
certStatus.response = hs.cert.OCSPStaple
- hs.finishedHash.Write(certStatus.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certStatus, &hs.finishedHash); err != nil {
return err
}
}
@@ -555,8 +565,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
return err
}
if skx != nil {
- hs.finishedHash.Write(skx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(skx, &hs.finishedHash); err != nil {
return err
}
}
@@ -582,15 +591,13 @@ func (hs *serverHandshakeState) doFullHandshake() error {
if c.config.ClientCAs != nil {
certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
}
- hs.finishedHash.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certReq, &hs.finishedHash); err != nil {
return err
}
}
helloDone := new(serverHelloDoneMsg)
- hs.finishedHash.Write(helloDone.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(helloDone, &hs.finishedHash); err != nil {
return err
}
@@ -600,7 +607,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
var pub crypto.PublicKey // public key for client auth, if any
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -613,7 +620,6 @@ func (hs *serverHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
- hs.finishedHash.Write(certMsg.marshal())
if err := c.processCertsFromClient(Certificate{
Certificate: certMsg.certificates,
@@ -624,7 +630,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
pub = c.peerCertificates[0].PublicKey
}
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -642,7 +648,6 @@ func (hs *serverHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(ckx, msg)
}
- hs.finishedHash.Write(ckx.marshal())
preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
if err != nil {
@@ -662,7 +667,10 @@ func (hs *serverHandshakeState) doFullHandshake() error {
// to the client's certificate. This allows us to verify that the client is in
// possession of the private key of the certificate.
if len(c.peerCertificates) > 0 {
- msg, err = c.readHandshake()
+ // certificateVerifyMsg is included in the transcript, but not until
+ // after we verify the handshake signature, since the state before
+ // this message was sent is used.
+ msg, err = c.readHandshake(nil)
if err != nil {
return err
}
@@ -697,7 +705,9 @@ func (hs *serverHandshakeState) doFullHandshake() error {
return errors.New("tls: invalid signature by the client certificate: " + err.Error())
}
- hs.finishedHash.Write(certVerify.marshal())
+ if err := transcriptMsg(certVerify, &hs.finishedHash); err != nil {
+ return err
+ }
}
hs.finishedHash.discardHandshakeBuffer()
@@ -737,7 +747,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error {
return err
}
- msg, err := c.readHandshake()
+ // finishedMsg is included in the transcript, but not until after we
+ // check the client version, since the state before this message was
+ // sent is used during verification.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -754,7 +767,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error {
return errors.New("tls: client's Finished message is incorrect")
}
- hs.finishedHash.Write(clientFinished.marshal())
+ if err := transcriptMsg(clientFinished, &hs.finishedHash); err != nil {
+ return err
+ }
+
copy(out, verify)
return nil
}
@@ -788,14 +804,16 @@ func (hs *serverHandshakeState) sendSessionTicket() error {
masterSecret: hs.masterSecret,
certificates: certsFromClient,
}
- var err error
- m.ticket, err = c.encryptTicket(state.marshal())
+ stateBytes, err := state.marshal()
+ if err != nil {
+ return err
+ }
+ m.ticket, err = c.encryptTicket(stateBytes)
if err != nil {
return err
}
- hs.finishedHash.Write(m.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(m, &hs.finishedHash); err != nil {
return err
}
@@ -805,14 +823,13 @@ func (hs *serverHandshakeState) sendSessionTicket() error {
func (hs *serverHandshakeState) sendFinished(out []byte) error {
c := hs.c
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ if err := c.writeChangeCipherRecord(); err != nil {
return err
}
finished := new(finishedMsg)
finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil {
return err
}
@@ -833,6 +850,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
c.sendAlert(alertBadCertificate)
return errors.New("tls: failed to parse client certificate: " + err.Error())
}
+ if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
+ c.sendAlert(alertBadCertificate)
+ return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
+ }
}
if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server_tls13.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_server_tls13.go
similarity index 91%
rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_server_tls13.go
rename to vendor/github.com/quic-go/qtls-go1-19/handshake_server_tls13.go
index e3db8063f..c4706c44d 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server_tls13.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_server_tls13.go
@@ -57,6 +57,7 @@ func (hs *serverHandshakeStateTLS13) handshake() error {
if err := hs.checkForResumption(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.pickCertificate(); err != nil {
return err
}
@@ -79,12 +80,13 @@ func (hs *serverHandshakeStateTLS13) handshake() error {
if err := hs.readClientCertificate(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.readClientFinished(); err != nil {
return err
}
atomic.StoreUint32(&c.handshakeStatus, 1)
-
+ c.updateConnectionState()
return nil
}
@@ -145,27 +147,14 @@ func (hs *serverHandshakeStateTLS13) processClientHello() error {
hs.hello.sessionId = hs.clientHello.sessionId
hs.hello.compressionMethod = compressionNone
- if hs.suite == nil {
- var preferenceList []uint16
- for _, suiteID := range c.config.CipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- preferenceList = append(preferenceList, suiteID)
- break
- }
- }
- }
- if len(preferenceList) == 0 {
- preferenceList = defaultCipherSuitesTLS13
- if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceList = defaultCipherSuitesTLS13NoAES
- }
- }
- for _, suiteID := range preferenceList {
- hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
- if hs.suite != nil {
- break
- }
+ preferenceList := defaultCipherSuitesTLS13
+ if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
+ preferenceList = defaultCipherSuitesTLS13NoAES
+ }
+ for _, suiteID := range preferenceList {
+ hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
+ if hs.suite != nil {
+ break
}
}
if hs.suite == nil {
@@ -332,7 +321,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
c.sendAlert(alertInternalError)
return errors.New("tls: internal error: failed to clone hash")
}
- transcript.Write(hs.clientHello.marshalWithoutBinders())
+ clientHelloBytes, err := hs.clientHello.marshalWithoutBinders()
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ transcript.Write(clientHelloBytes)
pskBinder := hs.suite.finishedHash(binderKey, transcript)
if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
c.sendAlert(alertDecryptError)
@@ -345,7 +339,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
}
h := cloneHash(hs.transcript, hs.suite.hash)
- h.Write(hs.clientHello.marshal())
+ clientHelloWithBindersBytes, err := hs.clientHello.marshal()
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ h.Write(clientHelloWithBindersBytes)
if hs.encryptedExtensions.earlyData {
clientEarlySecret := hs.suite.deriveSecret(hs.earlySecret, "c e traffic", h)
c.in.exportKey(Encryption0RTT, hs.suite, clientEarlySecret)
@@ -434,8 +433,7 @@ func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
}
hs.sentDummyCCS = true
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
+ return hs.c.writeChangeCipherRecord()
}
func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
@@ -443,7 +441,9 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID)
// The first ClientHello gets double-hashed into the transcript upon a
// HelloRetryRequest. See RFC 8446, Section 4.4.1.
- hs.transcript.Write(hs.clientHello.marshal())
+ if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
+ return err
+ }
chHash := hs.transcript.Sum(nil)
hs.transcript.Reset()
hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
@@ -459,8 +459,7 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID)
selectedGroup: selectedGroup,
}
- hs.transcript.Write(helloRetryRequest.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(helloRetryRequest, hs.transcript); err != nil {
return err
}
@@ -468,7 +467,8 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID)
return err
}
- msg, err := c.readHandshake()
+ // clientHelloMsg is not included in the transcript.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -564,9 +564,10 @@ func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool {
func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
c := hs.c
- hs.transcript.Write(hs.clientHello.marshal())
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
+ return err
+ }
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil {
return err
}
@@ -609,8 +610,7 @@ func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
hs.encryptedExtensions.additionalExtensions = hs.c.extraConfig.GetExtensions(typeEncryptedExtensions)
}
- hs.transcript.Write(hs.encryptedExtensions.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.encryptedExtensions.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(hs.encryptedExtensions, hs.transcript); err != nil {
return err
}
@@ -639,8 +639,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
}
- hs.transcript.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certReq, hs.transcript); err != nil {
return err
}
}
@@ -651,8 +650,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil {
return err
}
@@ -683,8 +681,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
}
certVerifyMsg.signature = sig
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil {
return err
}
@@ -698,8 +695,7 @@ func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
}
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil {
return err
}
@@ -761,7 +757,9 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
finishedMsg := &finishedMsg{
verifyData: hs.clientFinished,
}
- hs.transcript.Write(finishedMsg.marshal())
+ if err := transcriptMsg(finishedMsg, hs.transcript); err != nil {
+ return err
+ }
if !hs.shouldSendSessionTickets() {
return nil
@@ -782,7 +780,7 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
return err
}
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ if _, err := c.writeHandshakeRecord(m, nil); err != nil {
return err
}
@@ -807,7 +805,7 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
// If we requested a client certificate, then the client must send a
// certificate message. If it's empty, no CertificateVerify is sent.
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(hs.transcript)
if err != nil {
return err
}
@@ -817,7 +815,6 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
- hs.transcript.Write(certMsg.marshal())
if err := c.processCertsFromClient(certMsg.certificate); err != nil {
return err
@@ -831,7 +828,10 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
}
if len(certMsg.certificate.Certificate) != 0 {
- msg, err = c.readHandshake()
+ // certificateVerifyMsg is included in the transcript, but not until
+ // after we verify the handshake signature, since the state before
+ // this message was sent is used.
+ msg, err = c.readHandshake(nil)
if err != nil {
return err
}
@@ -862,7 +862,9 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
return errors.New("tls: invalid signature by the client certificate: " + err.Error())
}
- hs.transcript.Write(certVerify.marshal())
+ if err := transcriptMsg(certVerify, hs.transcript); err != nil {
+ return err
+ }
}
// If we waited until the client certificates to send session tickets, we
@@ -877,7 +879,8 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
func (hs *serverHandshakeStateTLS13) readClientFinished() error {
c := hs.c
- msg, err := c.readHandshake()
+ // finishedMsg is not included in the transcript.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/key_agreement.go b/vendor/github.com/quic-go/qtls-go1-19/key_agreement.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/key_agreement.go
rename to vendor/github.com/quic-go/qtls-go1-19/key_agreement.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/key_schedule.go b/vendor/github.com/quic-go/qtls-go1-19/key_schedule.go
similarity index 86%
rename from vendor/github.com/marten-seemann/qtls-go1-16/key_schedule.go
rename to vendor/github.com/quic-go/qtls-go1-19/key_schedule.go
index da13904a6..708bdc7c3 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-16/key_schedule.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/key_schedule.go
@@ -8,6 +8,7 @@ import (
"crypto/elliptic"
"crypto/hmac"
"errors"
+ "fmt"
"hash"
"io"
"math/big"
@@ -42,8 +43,24 @@ func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []by
hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(context)
})
+ hkdfLabelBytes, err := hkdfLabel.Bytes()
+ if err != nil {
+ // Rather than calling BytesOrPanic, we explicitly handle this error, in
+ // order to provide a reasonable error message. It should be basically
+ // impossible for this to panic, and routing errors back through the
+ // tree rooted in this function is quite painful. The labels are fixed
+ // size, and the context is either a fixed-length computed hash, or
+ // parsed from a field which has the same length limitation. As such, an
+ // error here is likely to only be caused during development.
+ //
+ // NOTE: another reasonable approach here might be to return a
+ // randomized slice if we encounter an error, which would break the
+ // connection, but avoid panicking. This would perhaps be safer but
+ // significantly more confusing to users.
+ panic(fmt.Errorf("failed to construct HKDF label: %s", err))
+ }
out := make([]byte, length)
- n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out)
+ n, err := hkdf.Expand(c.hash.New, secret, hkdfLabelBytes).Read(out)
if err != nil || n != length {
panic("tls: HKDF-Expand-Label invocation failed unexpectedly")
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/notboring.go b/vendor/github.com/quic-go/qtls-go1-19/notboring.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-19/notboring.go
rename to vendor/github.com/quic-go/qtls-go1-19/notboring.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/prf.go b/vendor/github.com/quic-go/qtls-go1-19/prf.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-16/prf.go
rename to vendor/github.com/quic-go/qtls-go1-19/prf.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/ticket.go b/vendor/github.com/quic-go/qtls-go1-19/ticket.go
similarity index 96%
rename from vendor/github.com/marten-seemann/qtls-go1-18/ticket.go
rename to vendor/github.com/quic-go/qtls-go1-19/ticket.go
index 81e8a52ea..fe1c7a884 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/ticket.go
+++ b/vendor/github.com/quic-go/qtls-go1-19/ticket.go
@@ -34,7 +34,7 @@ type sessionState struct {
usedOldKey bool
}
-func (m *sessionState) marshal() []byte {
+func (m *sessionState) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint16(m.vers)
b.AddUint16(m.cipherSuite)
@@ -49,7 +49,7 @@ func (m *sessionState) marshal() []byte {
})
}
})
- return b.BytesOrPanic()
+ return b.Bytes()
}
func (m *sessionState) unmarshal(data []byte) bool {
@@ -94,7 +94,7 @@ type sessionStateTLS13 struct {
appData []byte
}
-func (m *sessionStateTLS13) marshal() []byte {
+func (m *sessionStateTLS13) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint16(VersionTLS13)
b.AddUint8(2) // revision
@@ -111,7 +111,7 @@ func (m *sessionStateTLS13) marshal() []byte {
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.appData)
})
- return b.BytesOrPanic()
+ return b.Bytes()
}
func (m *sessionStateTLS13) unmarshal(data []byte) bool {
@@ -227,8 +227,11 @@ func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, e
if c.extraConfig != nil {
state.maxEarlyData = c.extraConfig.MaxEarlyData
}
- var err error
- m.label, err = c.encryptTicket(state.marshal())
+ stateBytes, err := state.marshal()
+ if err != nil {
+ return nil, err
+ }
+ m.label, err = c.encryptTicket(stateBytes)
if err != nil {
return nil, err
}
@@ -270,5 +273,5 @@ func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) {
if err != nil {
return nil, err
}
- return m.marshal(), nil
+ return m.marshal()
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/tls.go b/vendor/github.com/quic-go/qtls-go1-19/tls.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/tls.go
rename to vendor/github.com/quic-go/qtls-go1-19/tls.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-16/unsafe.go b/vendor/github.com/quic-go/qtls-go1-19/unsafe.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-16/unsafe.go
rename to vendor/github.com/quic-go/qtls-go1-19/unsafe.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/LICENSE b/vendor/github.com/quic-go/qtls-go1-20/LICENSE
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/LICENSE
rename to vendor/github.com/quic-go/qtls-go1-20/LICENSE
diff --git a/vendor/github.com/quic-go/qtls-go1-20/README.md b/vendor/github.com/quic-go/qtls-go1-20/README.md
new file mode 100644
index 000000000..2beaa2f23
--- /dev/null
+++ b/vendor/github.com/quic-go/qtls-go1-20/README.md
@@ -0,0 +1,6 @@
+# qtls
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/quic-go/qtls-go1-20.svg)](https://pkg.go.dev/github.com/quic-go/qtls-go1-20)
+[![.github/workflows/go-test.yml](https://github.com/quic-go/qtls-go1-20/actions/workflows/go-test.yml/badge.svg)](https://github.com/quic-go/qtls-go1-20/actions/workflows/go-test.yml)
+
+This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/quic-go/quic-go).
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/alert.go b/vendor/github.com/quic-go/qtls-go1-20/alert.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/alert.go
rename to vendor/github.com/quic-go/qtls-go1-20/alert.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/auth.go b/vendor/github.com/quic-go/qtls-go1-20/auth.go
similarity index 98%
rename from vendor/github.com/marten-seemann/qtls-go1-17/auth.go
rename to vendor/github.com/quic-go/qtls-go1-20/auth.go
index 1ef675fd3..effc9aced 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-17/auth.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/auth.go
@@ -169,6 +169,7 @@ var rsaSignatureSchemes = []struct {
// and optionally filtered by its explicit SupportedSignatureAlgorithms.
//
// This function must be kept in sync with supportedSignatureAlgorithms.
+// FIPS filtering is applied in the caller, selectSignatureScheme.
func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme {
priv, ok := cert.PrivateKey.(crypto.Signer)
if !ok {
@@ -241,6 +242,9 @@ func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureSche
// Pick signature scheme in the peer's preference order, as our
// preference order is not configurable.
for _, preferredAlg := range peerAlgs {
+ if needFIPS() && !isSupportedSignatureAlgorithm(preferredAlg, fipsSupportedSignatureAlgorithms) {
+ continue
+ }
if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) {
return preferredAlg, nil
}
diff --git a/vendor/github.com/quic-go/qtls-go1-20/cache.go b/vendor/github.com/quic-go/qtls-go1-20/cache.go
new file mode 100644
index 000000000..99e0c5fb8
--- /dev/null
+++ b/vendor/github.com/quic-go/qtls-go1-20/cache.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package qtls
+
+import (
+ "crypto/x509"
+ "runtime"
+ "sync"
+ "sync/atomic"
+)
+
+type cacheEntry struct {
+ refs atomic.Int64
+ cert *x509.Certificate
+}
+
+// certCache implements an intern table for reference counted x509.Certificates,
+// implemented in a similar fashion to BoringSSL's CRYPTO_BUFFER_POOL. This
+// allows for a single x509.Certificate to be kept in memory and referenced from
+// multiple Conns. Returned references should not be mutated by callers. Certificates
+// are still safe to use after they are removed from the cache.
+//
+// Certificates are returned wrapped in a activeCert struct that should be held by
+// the caller. When references to the activeCert are freed, the number of references
+// to the certificate in the cache is decremented. Once the number of references
+// reaches zero, the entry is evicted from the cache.
+//
+// The main difference between this implementation and CRYPTO_BUFFER_POOL is that
+// CRYPTO_BUFFER_POOL is a more generic structure which supports blobs of data,
+// rather than specific structures. Since we only care about x509.Certificates,
+// certCache is implemented as a specific cache, rather than a generic one.
+//
+// See https://boringssl.googlesource.com/boringssl/+/master/include/openssl/pool.h
+// and https://boringssl.googlesource.com/boringssl/+/master/crypto/pool/pool.c
+// for the BoringSSL reference.
+type certCache struct {
+ sync.Map
+}
+
+var clientCertCache = new(certCache)
+
+// activeCert is a handle to a certificate held in the cache. Once there are
+// no alive activeCerts for a given certificate, the certificate is removed
+// from the cache by a finalizer.
+type activeCert struct {
+ cert *x509.Certificate
+}
+
+// active increments the number of references to the entry, wraps the
+// certificate in the entry in a activeCert, and sets the finalizer.
+//
+// Note that there is a race between active and the finalizer set on the
+// returned activeCert, triggered if active is called after the ref count is
+// decremented such that refs may be > 0 when evict is called. We consider this
+// safe, since the caller holding an activeCert for an entry that is no longer
+// in the cache is fine, with the only side effect being the memory overhead of
+// there being more than one distinct reference to a certificate alive at once.
+func (cc *certCache) active(e *cacheEntry) *activeCert {
+ e.refs.Add(1)
+ a := &activeCert{e.cert}
+ runtime.SetFinalizer(a, func(_ *activeCert) {
+ if e.refs.Add(-1) == 0 {
+ cc.evict(e)
+ }
+ })
+ return a
+}
+
+// evict removes a cacheEntry from the cache.
+func (cc *certCache) evict(e *cacheEntry) {
+ cc.Delete(string(e.cert.Raw))
+}
+
+// newCert returns a x509.Certificate parsed from der. If there is already a copy
+// of the certificate in the cache, a reference to the existing certificate will
+// be returned. Otherwise, a fresh certificate will be added to the cache, and
+// the reference returned. The returned reference should not be mutated.
+func (cc *certCache) newCert(der []byte) (*activeCert, error) {
+ if entry, ok := cc.Load(string(der)); ok {
+ return cc.active(entry.(*cacheEntry)), nil
+ }
+
+ cert, err := x509.ParseCertificate(der)
+ if err != nil {
+ return nil, err
+ }
+
+ entry := &cacheEntry{cert: cert}
+ if entry, loaded := cc.LoadOrStore(string(der), entry); loaded {
+ return cc.active(entry.(*cacheEntry)), nil
+ }
+ return cc.active(entry), nil
+}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/cipher_suites.go b/vendor/github.com/quic-go/qtls-go1-20/cipher_suites.go
similarity index 91%
rename from vendor/github.com/marten-seemann/qtls-go1-18/cipher_suites.go
rename to vendor/github.com/quic-go/qtls-go1-20/cipher_suites.go
index e0be51474..43d213157 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/cipher_suites.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/cipher_suites.go
@@ -226,57 +226,56 @@ var cipherSuitesTLS13 = []*cipherSuiteTLS13{ // TODO: replace with a map.
//
// - Anything else comes before RC4
//
-// RC4 has practically exploitable biases. See https://www.rc4nomore.com.
+// RC4 has practically exploitable biases. See https://www.rc4nomore.com.
//
// - Anything else comes before CBC_SHA256
//
-// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13
-// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and
-// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
+// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13
+// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and
+// https://www.imperialviolet.org/2013/02/04/luckythirteen.html.
//
// - Anything else comes before 3DES
//
-// 3DES has 64-bit blocks, which makes it fundamentally susceptible to
-// birthday attacks. See https://sweet32.info.
+// 3DES has 64-bit blocks, which makes it fundamentally susceptible to
+// birthday attacks. See https://sweet32.info.
//
// - ECDHE comes before anything else
//
-// Once we got the broken stuff out of the way, the most important
-// property a cipher suite can have is forward secrecy. We don't
-// implement FFDHE, so that means ECDHE.
+// Once we got the broken stuff out of the way, the most important
+// property a cipher suite can have is forward secrecy. We don't
+// implement FFDHE, so that means ECDHE.
//
// - AEADs come before CBC ciphers
//
-// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites
-// are fundamentally fragile, and suffered from an endless sequence of
-// padding oracle attacks. See https://eprint.iacr.org/2015/1129,
-// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and
-// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/.
+// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites
+// are fundamentally fragile, and suffered from an endless sequence of
+// padding oracle attacks. See https://eprint.iacr.org/2015/1129,
+// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and
+// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/.
//
// - AES comes before ChaCha20
//
-// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster
-// than ChaCha20Poly1305.
+// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster
+// than ChaCha20Poly1305.
//
-// When AES hardware is not available, AES-128-GCM is one or more of: much
-// slower, way more complex, and less safe (because not constant time)
-// than ChaCha20Poly1305.
+// When AES hardware is not available, AES-128-GCM is one or more of: much
+// slower, way more complex, and less safe (because not constant time)
+// than ChaCha20Poly1305.
//
-// We use this list if we think both peers have AES hardware, and
-// cipherSuitesPreferenceOrderNoAES otherwise.
+// We use this list if we think both peers have AES hardware, and
+// cipherSuitesPreferenceOrderNoAES otherwise.
//
// - AES-128 comes before AES-256
//
-// The only potential advantages of AES-256 are better multi-target
-// margins, and hypothetical post-quantum properties. Neither apply to
-// TLS, and AES-256 is slower due to its four extra rounds (which don't
-// contribute to the advantages above).
+// The only potential advantages of AES-256 are better multi-target
+// margins, and hypothetical post-quantum properties. Neither apply to
+// TLS, and AES-256 is slower due to its four extra rounds (which don't
+// contribute to the advantages above).
//
// - ECDSA comes before RSA
//
-// The relative order of ECDSA and RSA cipher suites doesn't matter,
-// as they depend on the certificate. Pick one to get a stable order.
-//
+// The relative order of ECDSA and RSA cipher suites doesn't matter,
+// as they depend on the certificate. Pick one to get a stable order.
var cipherSuitesPreferenceOrder = []uint16{
// AEADs w/ ECDHE
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
@@ -419,7 +418,9 @@ func cipherAES(key, iv []byte, isRead bool) any {
// macSHA1 returns a SHA-1 based constant time MAC.
func macSHA1(key []byte) hash.Hash {
- return hmac.New(newConstantTimeHash(sha1.New), key)
+ h := sha1.New
+ h = newConstantTimeHash(h)
+ return hmac.New(h, key)
}
// macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and
@@ -464,7 +465,7 @@ func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([
return f.aead.Open(out, f.nonce[:], ciphertext, additionalData)
}
-// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
+// xorNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
// before each call.
type xorNonceAEAD struct {
nonceMask [aeadNonceLength]byte
@@ -507,7 +508,8 @@ func aeadAESGCM(key, noncePrefix []byte) aead {
if err != nil {
panic(err)
}
- aead, err := cipher.NewGCM(aes)
+ var aead cipher.AEAD
+ aead, err = cipher.NewGCM(aes)
if err != nil {
panic(err)
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/common.go b/vendor/github.com/quic-go/qtls-go1-20/common.go
similarity index 97%
rename from vendor/github.com/marten-seemann/qtls-go1-18/common.go
rename to vendor/github.com/quic-go/qtls-go1-20/common.go
index 4c9aeeb4a..074dd9dce 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/common.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/common.go
@@ -181,11 +181,11 @@ const (
// hash function associated with the Ed25519 signature scheme.
var directSigning crypto.Hash = 0
-// supportedSignatureAlgorithms contains the signature and hash algorithms that
+// defaultSupportedSignatureAlgorithms contains the signature and hash algorithms that
// the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+
// CertificateRequest. The two fields are merged to match with TLS 1.3.
// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
-var supportedSignatureAlgorithms = []SignatureScheme{
+var defaultSupportedSignatureAlgorithms = []SignatureScheme{
PSSWithSHA256,
ECDSAWithP256AndSHA256,
Ed25519,
@@ -258,6 +258,8 @@ type connectionState struct {
// On the client side, it can't be empty. On the server side, it can be
// empty if Config.ClientAuth is not RequireAnyClientCert or
// RequireAndVerifyClientCert.
+ //
+ // PeerCertificates and its contents should not be modified.
PeerCertificates []*x509.Certificate
// VerifiedChains is a list of one or more chains where the first element is
@@ -267,6 +269,8 @@ type connectionState struct {
// On the client side, it's set if Config.InsecureSkipVerify is false. On
// the server side, it's set if Config.ClientAuth is VerifyClientCertIfGiven
// (and the peer provided a certificate) or RequireAndVerifyClientCert.
+ //
+ // VerifiedChains and its contents should not be modified.
VerifiedChains [][]*x509.Certificate
// SignedCertificateTimestamps is a list of SCTs provided by the peer
@@ -345,7 +349,8 @@ type clientSessionState struct {
// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not
// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which
// are supported via this interface.
-//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/marten-seemann/qtls-go1-17 ClientSessionCache"
+//
+//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/quic-go/qtls-go1-20 ClientSessionCache"
type ClientSessionCache = tls.ClientSessionCache
// SignatureScheme is a tls.SignatureScheme
@@ -543,6 +548,8 @@ type config struct {
// If GetCertificate is nil or returns nil, then the certificate is
// retrieved from NameToCertificate. If NameToCertificate is nil, the
// best element of Certificates will be used.
+ //
+ // Once a Certificate is returned it should not be modified.
GetCertificate func(*ClientHelloInfo) (*Certificate, error)
// GetClientCertificate, if not nil, is called when a server requests a
@@ -558,6 +565,8 @@ type config struct {
//
// GetClientCertificate may be called multiple times for the same
// connection if renegotiation occurs or if TLS 1.3 is in use.
+ //
+ // Once a Certificate is returned it should not be modified.
GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error)
// GetConfigForClient, if not nil, is called after a ClientHello is
@@ -586,6 +595,8 @@ type config struct {
// setting InsecureSkipVerify, or (for a server) when ClientAuth is
// RequestClientCert or RequireAnyClientCert, then this callback will
// be considered but the verifiedChains argument will always be nil.
+ //
+ // verifiedChains and its contents should not be modified.
VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
// VerifyConnection, if not nil, is called after normal certificate
@@ -714,7 +725,7 @@ type config struct {
// mutex protects sessionTicketKeys and autoSessionTicketKeys.
mutex sync.RWMutex
- // sessionTicketKeys contains zero or more ticket keys. If set, it means the
+ // sessionTicketKeys contains zero or more ticket keys. If set, it means
// the keys were set with SessionTicketKey or SetSessionTicketKeys. The
// first key is used for new tickets and any subsequent keys can be used to
// decrypt old tickets. The slice contents are not protected by the mutex
@@ -1037,6 +1048,9 @@ func (c *config) time() time.Time {
}
func (c *config) cipherSuites() []uint16 {
+ if needFIPS() {
+ return fipsCipherSuites(c)
+ }
if c.CipherSuites != nil {
return c.CipherSuites
}
@@ -1050,10 +1064,6 @@ var supportedVersions = []uint16{
VersionTLS10,
}
-// debugEnableTLS10 enables TLS 1.0. See issue 45428.
-// We don't care about TLS1.0 in qtls. Always disable it.
-var debugEnableTLS10 = false
-
// roleClient and roleServer are meant to call supportedVersions and parents
// with more readability at the callsite.
const roleClient = true
@@ -1062,7 +1072,10 @@ const roleServer = false
func (c *config) supportedVersions(isClient bool) []uint16 {
versions := make([]uint16, 0, len(supportedVersions))
for _, v := range supportedVersions {
- if (c == nil || c.MinVersion == 0) && !debugEnableTLS10 &&
+ if needFIPS() && (v < fipsMinVersion(c) || v > fipsMaxVersion(c)) {
+ continue
+ }
+ if (c == nil || c.MinVersion == 0) &&
isClient && v < VersionTLS12 {
continue
}
@@ -1102,6 +1115,9 @@ func supportedVersionsFromMax(maxVersion uint16) []uint16 {
var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521}
func (c *config) curvePreferences() []CurveID {
+ if needFIPS() {
+ return fipsCurvePreferences(c)
+ }
if c == nil || len(c.CurvePreferences) == 0 {
return defaultCurvePreferences
}
@@ -1380,7 +1396,7 @@ func (c *config) writeKeyLog(label string, clientRandom, secret []byte) error {
return nil
}
- logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))
+ logLine := fmt.Appendf(nil, "%s %x %x\n", label, clientRandom, secret)
writerMutex.Lock()
_, err := c.KeyLogWriter.Write(logLine)
@@ -1406,7 +1422,7 @@ func leafCertificate(c *Certificate) (*x509.Certificate, error) {
}
type handshakeMessage interface {
- marshal() []byte
+ marshal() ([]byte, error)
unmarshal([]byte) bool
}
@@ -1505,3 +1521,18 @@ func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlg
}
return false
}
+
+// CertificateVerificationError is returned when certificate verification fails during the handshake.
+type CertificateVerificationError struct {
+ // UnverifiedCertificates and its contents should not be modified.
+ UnverifiedCertificates []*x509.Certificate
+ Err error
+}
+
+func (e *CertificateVerificationError) Error() string {
+ return fmt.Sprintf("tls: failed to verify certificate: %s", e.Err)
+}
+
+func (e *CertificateVerificationError) Unwrap() error {
+ return e.Err
+}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/conn.go b/vendor/github.com/quic-go/qtls-go1-20/conn.go
similarity index 94%
rename from vendor/github.com/marten-seemann/qtls-go1-18/conn.go
rename to vendor/github.com/quic-go/qtls-go1-20/conn.go
index 90a27b5dd..656c83c71 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/conn.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/conn.go
@@ -30,11 +30,10 @@ type Conn struct {
isClient bool
handshakeFn func(context.Context) error // (*Conn).clientHandshake or serverHandshake
- // handshakeStatus is 1 if the connection is currently transferring
+ // isHandshakeComplete is true if the connection is currently transferring
// application data (i.e. is not currently processing a handshake).
- // handshakeStatus == 1 implies handshakeErr == nil.
- // This field is only to be accessed with sync/atomic.
- handshakeStatus uint32
+ // isHandshakeComplete is true implies handshakeErr == nil.
+ isHandshakeComplete atomic.Bool
// constant after handshake; protected by handshakeMutex
handshakeMutex sync.Mutex
handshakeErr error // error resulting from handshake
@@ -52,6 +51,9 @@ type Conn struct {
ocspResponse []byte // stapled OCSP response
scts [][]byte // signed certificate timestamps from server
peerCertificates []*x509.Certificate
+ // activeCertHandles contains the cache handles to certificates in
+ // peerCertificates that are used to track active references.
+ activeCertHandles []*activeCert
// verifiedChains contains the certificate chains that we built, as
// opposed to the ones presented by the server.
verifiedChains [][]*x509.Certificate
@@ -117,14 +119,16 @@ type Conn struct {
// handshake, nor deliver application data. Protected by in.Mutex.
retryCount int
- // activeCall is an atomic int32; the low bit is whether Close has
- // been called. the rest of the bits are the number of goroutines
- // in Conn.Write.
- activeCall int32
+ // activeCall indicates whether Close has been call in the low bit.
+ // the rest of the bits are the number of goroutines in Conn.Write.
+ activeCall atomic.Int32
used0RTT bool
tmp [16]byte
+
+ connStateMutex sync.Mutex
+ connState ConnectionStateWith0RTT
}
// Access to net.Conn methods.
@@ -618,12 +622,14 @@ func (c *Conn) readChangeCipherSpec() error {
// readRecordOrCCS reads one or more TLS records from the connection and
// updates the record layer state. Some invariants:
-// * c.in must be locked
-// * c.input must be empty
+// - c.in must be locked
+// - c.input must be empty
+//
// During the handshake one and only one of the following will happen:
// - c.hand grows
// - c.in.changeCipherSpec is called
// - an error is returned
+//
// After the handshake one and only one of the following will happen:
// - c.hand grows
// - c.input is set
@@ -632,7 +638,7 @@ func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error {
if c.in.err != nil {
return c.in.err
}
- handshakeComplete := c.handshakeComplete()
+ handshakeComplete := c.isHandshakeComplete.Load()
// This function modifies c.rawInput, which owns the c.input memory.
if c.input.Len() != 0 {
@@ -789,7 +795,7 @@ func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error {
return nil
}
-// retryReadRecord recurses into readRecordOrCCS to drop a non-advancing record, like
+// retryReadRecord recurs into readRecordOrCCS to drop a non-advancing record, like
// a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3.
func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error {
c.retryCount++
@@ -1036,25 +1042,46 @@ func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
return n, nil
}
-// writeRecord writes a TLS record with the given type and payload to the
-// connection and updates the record layer state.
-func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
+// writeHandshakeRecord writes a handshake message to the connection and updates
+// the record layer state. If transcript is non-nil the marshalled message is
+// written to it.
+func (c *Conn) writeHandshakeRecord(msg handshakeMessage, transcript transcriptHash) (int, error) {
+ data, err := msg.marshal()
+ if err != nil {
+ return 0, err
+ }
+
+ c.out.Lock()
+ defer c.out.Unlock()
+
+ if transcript != nil {
+ transcript.Write(data)
+ }
+
if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
- if typ == recordTypeChangeCipherSpec {
- return len(data), nil
- }
return c.extraConfig.AlternativeRecordLayer.WriteRecord(data)
}
+ return c.writeRecordLocked(recordTypeHandshake, data)
+}
+
+// writeChangeCipherRecord writes a ChangeCipherSpec message to the connection and
+// updates the record layer state.
+func (c *Conn) writeChangeCipherRecord() error {
+ if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
+ return nil
+ }
+
c.out.Lock()
defer c.out.Unlock()
-
- return c.writeRecordLocked(typ, data)
+ _, err := c.writeRecordLocked(recordTypeChangeCipherSpec, []byte{1})
+ return err
}
// readHandshake reads the next handshake message from
-// the record layer.
-func (c *Conn) readHandshake() (any, error) {
+// the record layer. If transcript is non-nil, the message
+// is written to the passed transcriptHash.
+func (c *Conn) readHandshake(transcript transcriptHash) (any, error) {
var data []byte
if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil {
var err error
@@ -1142,6 +1169,11 @@ func (c *Conn) readHandshake() (any, error) {
if !m.unmarshal(data) {
return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
+
+ if transcript != nil {
+ transcript.Write(data)
+ }
+
return m, nil
}
@@ -1158,15 +1190,15 @@ var (
func (c *Conn) Write(b []byte) (int, error) {
// interlock with Close below
for {
- x := atomic.LoadInt32(&c.activeCall)
+ x := c.activeCall.Load()
if x&1 != 0 {
return 0, net.ErrClosed
}
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
+ if c.activeCall.CompareAndSwap(x, x+2) {
break
}
}
- defer atomic.AddInt32(&c.activeCall, -2)
+ defer c.activeCall.Add(-2)
if err := c.Handshake(); err != nil {
return 0, err
@@ -1179,7 +1211,7 @@ func (c *Conn) Write(b []byte) (int, error) {
return 0, err
}
- if !c.handshakeComplete() {
+ if !c.isHandshakeComplete.Load() {
return 0, alertInternalError
}
@@ -1217,7 +1249,7 @@ func (c *Conn) handleRenegotiation() error {
return errors.New("tls: internal error: unexpected renegotiation")
}
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -1249,7 +1281,7 @@ func (c *Conn) handleRenegotiation() error {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
- atomic.StoreUint32(&c.handshakeStatus, 0)
+ c.isHandshakeComplete.Store(false)
if c.handshakeErr = c.clientHandshake(context.Background()); c.handshakeErr == nil {
c.handshakes++
}
@@ -1267,7 +1299,7 @@ func (c *Conn) handlePostHandshakeMessage() error {
return c.handleRenegotiation()
}
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -1303,7 +1335,11 @@ func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error {
defer c.out.Unlock()
msg := &keyUpdateMsg{}
- _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal())
+ msgBytes, err := msg.marshal()
+ if err != nil {
+ return err
+ }
+ _, err = c.writeRecordLocked(recordTypeHandshake, msgBytes)
if err != nil {
// Surface the error at the next write.
c.out.setErrorLocked(err)
@@ -1371,11 +1407,11 @@ func (c *Conn) Close() error {
// Interlock with Conn.Write above.
var x int32
for {
- x = atomic.LoadInt32(&c.activeCall)
+ x = c.activeCall.Load()
if x&1 != 0 {
return net.ErrClosed
}
- if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
+ if c.activeCall.CompareAndSwap(x, x|1) {
break
}
}
@@ -1390,7 +1426,7 @@ func (c *Conn) Close() error {
}
var alertErr error
- if c.handshakeComplete() {
+ if c.isHandshakeComplete.Load() {
if err := c.closeNotify(); err != nil {
alertErr = fmt.Errorf("tls: failed to send closeNotify alert (but connection was closed anyway): %w", err)
}
@@ -1408,7 +1444,7 @@ var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake com
// called once the handshake has completed and does not call CloseWrite on the
// underlying connection. Most callers should just use Close.
func (c *Conn) CloseWrite() error {
- if !c.handshakeComplete() {
+ if !c.isHandshakeComplete.Load() {
return errEarlyCloseWrite
}
@@ -1462,7 +1498,7 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
// Fast sync/atomic-based exit if there is no handshake in flight and the
// last one succeeded without an error. Avoids the expensive context setup
// and mutex for most Read and Write calls.
- if c.handshakeComplete() {
+ if c.isHandshakeComplete.Load() {
return nil
}
@@ -1505,7 +1541,7 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
if err := c.handshakeErr; err != nil {
return err
}
- if c.handshakeComplete() {
+ if c.isHandshakeComplete.Load() {
return nil
}
@@ -1521,10 +1557,10 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
c.flush()
}
- if c.handshakeErr == nil && !c.handshakeComplete() {
+ if c.handshakeErr == nil && !c.isHandshakeComplete.Load() {
c.handshakeErr = errors.New("tls: internal error: handshake should have had a result")
}
- if c.handshakeErr != nil && c.handshakeComplete() {
+ if c.handshakeErr != nil && c.isHandshakeComplete.Load() {
panic("tls: internal error: handshake returned an error but is marked successful")
}
@@ -1533,24 +1569,21 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) {
// ConnectionState returns basic TLS details about the connection.
func (c *Conn) ConnectionState() ConnectionState {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return c.connectionStateLocked()
+ c.connStateMutex.Lock()
+ defer c.connStateMutex.Unlock()
+ return c.connState.ConnectionState
}
// ConnectionStateWith0RTT returns basic TLS details (incl. 0-RTT status) about the connection.
func (c *Conn) ConnectionStateWith0RTT() ConnectionStateWith0RTT {
- c.handshakeMutex.Lock()
- defer c.handshakeMutex.Unlock()
- return ConnectionStateWith0RTT{
- ConnectionState: c.connectionStateLocked(),
- Used0RTT: c.used0RTT,
- }
+ c.connStateMutex.Lock()
+ defer c.connStateMutex.Unlock()
+ return c.connState
}
func (c *Conn) connectionStateLocked() ConnectionState {
var state connectionState
- state.HandshakeComplete = c.handshakeComplete()
+ state.HandshakeComplete = c.isHandshakeComplete.Load()
state.Version = c.vers
state.NegotiatedProtocol = c.clientProtocol
state.DidResume = c.didResume
@@ -1576,6 +1609,15 @@ func (c *Conn) connectionStateLocked() ConnectionState {
return toConnectionState(state)
}
+func (c *Conn) updateConnectionState() {
+ c.connStateMutex.Lock()
+ defer c.connStateMutex.Unlock()
+ c.connState = ConnectionStateWith0RTT{
+ Used0RTT: c.used0RTT,
+ ConnectionState: c.connectionStateLocked(),
+ }
+}
+
// OCSPResponse returns the stapled OCSP response from the TLS server, if
// any. (Only valid for client connections.)
func (c *Conn) OCSPResponse() []byte {
@@ -1594,7 +1636,7 @@ func (c *Conn) VerifyHostname(host string) error {
if !c.isClient {
return errors.New("tls: VerifyHostname called on TLS server connection")
}
- if !c.handshakeComplete() {
+ if !c.isHandshakeComplete.Load() {
return errors.New("tls: handshake has not yet been performed")
}
if len(c.verifiedChains) == 0 {
@@ -1602,7 +1644,3 @@ func (c *Conn) VerifyHostname(host string) error {
}
return c.peerCertificates[0].VerifyHostname(host)
}
-
-func (c *Conn) handshakeComplete() bool {
- return atomic.LoadUint32(&c.handshakeStatus) == 1
-}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/cpu.go b/vendor/github.com/quic-go/qtls-go1-20/cpu.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-18/cpu.go
rename to vendor/github.com/quic-go/qtls-go1-20/cpu.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/cpu_other.go b/vendor/github.com/quic-go/qtls-go1-20/cpu_other.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-18/cpu_other.go
rename to vendor/github.com/quic-go/qtls-go1-20/cpu_other.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_client.go
similarity index 88%
rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_client.go
rename to vendor/github.com/quic-go/qtls-go1-20/handshake_client.go
index ab691d566..ebb56ebe1 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_client.go
@@ -8,6 +8,7 @@ import (
"bytes"
"context"
"crypto"
+ "crypto/ecdh"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
@@ -19,7 +20,6 @@ import (
"io"
"net"
"strings"
- "sync/atomic"
"time"
"golang.org/x/crypto/cryptobyte"
@@ -38,7 +38,9 @@ type clientHandshakeState struct {
session *clientSessionState
}
-func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
+var testingOnlyForceClientHelloSignatureAlgorithms []SignatureScheme
+
+func (c *Conn) makeClientHello() (*clientHelloMsg, *ecdh.PrivateKey, error) {
config := c.config
if len(config.ServerName) == 0 && !config.InsecureSkipVerify {
return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config")
@@ -134,45 +136,39 @@ func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) {
}
if hello.vers >= VersionTLS12 {
- hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms
+ hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
+ }
+ if testingOnlyForceClientHelloSignatureAlgorithms != nil {
+ hello.supportedSignatureAlgorithms = testingOnlyForceClientHelloSignatureAlgorithms
}
- var params ecdheParameters
+ var key *ecdh.PrivateKey
if hello.supportedVersions[0] == VersionTLS13 {
- var suites []uint16
- for _, suiteID := range configCipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- suites = append(suites, suiteID)
- }
- }
+ if len(hello.supportedVersions) == 1 {
+ hello.cipherSuites = hello.cipherSuites[:0]
}
- if len(suites) > 0 {
- hello.cipherSuites = suites
+ if hasAESGCMHardwareSupport {
+ hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
} else {
- if hasAESGCMHardwareSupport {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...)
- } else {
- hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
- }
+ hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...)
}
curveID := config.curvePreferences()[0]
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
+ if _, ok := curveForCurveID(curveID); !ok {
return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve")
}
- params, err = generateECDHEParameters(config.rand(), curveID)
+ key, err = generateECDHEKey(config.rand(), curveID)
if err != nil {
return nil, nil, err
}
- hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
+ hello.keyShares = []keyShare{{group: curveID, data: key.PublicKey().Bytes()}}
}
if hello.supportedVersions[0] == VersionTLS13 && c.extraConfig != nil && c.extraConfig.GetExtensions != nil {
hello.additionalExtensions = c.extraConfig.GetExtensions(typeClientHello)
}
- return hello, params, nil
+ return hello, key, nil
}
func (c *Conn) clientHandshake(ctx context.Context) (err error) {
@@ -185,13 +181,16 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
// need to be reset.
c.didResume = false
- hello, ecdheParams, err := c.makeClientHello()
+ hello, ecdheKey, err := c.makeClientHello()
if err != nil {
return err
}
c.serverName = hello.serverName
- cacheKey, session, earlySecret, binderKey := c.loadSession(hello)
+ cacheKey, session, earlySecret, binderKey, err := c.loadSession(hello)
+ if err != nil {
+ return err
+ }
if cacheKey != "" && session != nil {
var deletedTicket bool
if session.vers == VersionTLS13 && hello.earlyData && c.extraConfig != nil && c.extraConfig.Enable0RTT {
@@ -201,11 +200,14 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
if suite := cipherSuiteTLS13ByID(session.cipherSuite); suite != nil {
h := suite.hash.New()
- h.Write(hello.marshal())
+ helloBytes, err := hello.marshal()
+ if err != nil {
+ return err
+ }
+ h.Write(helloBytes)
clientEarlySecret := suite.deriveSecret(earlySecret, "c e traffic", h)
c.out.exportKey(Encryption0RTT, suite, clientEarlySecret)
if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hello.random, clientEarlySecret); err != nil {
- c.sendAlert(alertInternalError)
return err
}
}
@@ -225,11 +227,12 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
}
}
- if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
+ if _, err := c.writeHandshakeRecord(hello, nil); err != nil {
return err
}
- msg, err := c.readHandshake()
+ // serverHelloMsg is not included in the transcript
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -262,7 +265,7 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
ctx: ctx,
serverHello: serverHello,
hello: hello,
- ecdheParams: ecdheParams,
+ ecdheKey: ecdheKey,
session: session,
earlySecret: earlySecret,
binderKey: binderKey,
@@ -290,6 +293,7 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) {
c.config.ClientSessionCache.Put(cacheKey, toClientSessionState(hs.session))
}
+ c.updateConnectionState()
return nil
}
@@ -321,9 +325,9 @@ func (c *Conn) decodeSessionState(session *clientSessionState) (uint32 /* max ea
}
func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
- session *clientSessionState, earlySecret, binderKey []byte) {
+ session *clientSessionState, earlySecret, binderKey []byte, err error) {
if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil {
- return "", nil, nil, nil
+ return "", nil, nil, nil, nil
}
hello.ticketSupported = true
@@ -338,14 +342,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
// renegotiation is primarily used to allow a client to send a client
// certificate, which would be skipped if session resumption occurred.
if c.handshakes != 0 {
- return "", nil, nil, nil
+ return "", nil, nil, nil, nil
}
// Try to resume a previously negotiated TLS session, if available.
cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config)
sess, ok := c.config.ClientSessionCache.Get(cacheKey)
if !ok || sess == nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
session = fromClientSessionState(sess)
@@ -356,7 +360,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
maxEarlyData, appData, ok = c.decodeSessionState(session)
if !ok { // delete it, if parsing failed
c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
}
@@ -369,7 +373,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
}
}
if !versOk {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
// Check that the cached server certificate is not expired, and that it's
@@ -378,16 +382,16 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
if !c.config.InsecureSkipVerify {
if len(session.verifiedChains) == 0 {
// The original connection had InsecureSkipVerify, while this doesn't.
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
serverCert := session.serverCertificates[0]
if c.config.time().After(serverCert.NotAfter) {
// Expired certificate, delete the entry.
c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
if err := serverCert.VerifyHostname(c.config.ServerName); err != nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
}
@@ -395,7 +399,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
// In TLS 1.2 the cipher suite must match the resumed session. Ensure we
// are still offering it.
if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
hello.sessionTicket = session.sessionTicket
@@ -405,14 +409,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
// Check that the session ticket is not expired.
if c.config.time().After(session.useBy) {
c.config.ClientSessionCache.Put(cacheKey, nil)
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
// In TLS 1.3 the KDF hash must match the resumed session. Ensure we
// offer at least one cipher suite with that hash.
cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite)
if cipherSuite == nil {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
cipherSuiteOk := false
for _, offeredID := range hello.cipherSuites {
@@ -423,7 +427,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
}
}
if !cipherSuiteOk {
- return cacheKey, nil, nil, nil
+ return cacheKey, nil, nil, nil, nil
}
// Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1.
@@ -444,9 +448,15 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string,
hello.earlyData = c.extraConfig.Enable0RTT && maxEarlyData > 0
}
transcript := cipherSuite.hash.New()
- transcript.Write(hello.marshalWithoutBinders())
+ helloBytes, err := hello.marshalWithoutBinders()
+ if err != nil {
+ return "", nil, nil, nil, err
+ }
+ transcript.Write(helloBytes)
pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)}
- hello.updateBinders(pskBinders)
+ if err := hello.updateBinders(pskBinders); err != nil {
+ return "", nil, nil, nil, err
+ }
if session.vers == VersionTLS13 && c.extraConfig != nil && c.extraConfig.SetAppDataFromSessionState != nil {
c.extraConfig.SetAppDataFromSessionState(appData)
@@ -494,8 +504,12 @@ func (hs *clientHandshakeState) handshake() error {
hs.finishedHash.discardHandshakeBuffer()
}
- hs.finishedHash.Write(hs.hello.marshal())
- hs.finishedHash.Write(hs.serverHello.marshal())
+ if err := transcriptMsg(hs.hello, &hs.finishedHash); err != nil {
+ return err
+ }
+ if err := transcriptMsg(hs.serverHello, &hs.finishedHash); err != nil {
+ return err
+ }
c.buffering = true
c.didResume = isResume
@@ -548,7 +562,7 @@ func (hs *clientHandshakeState) handshake() error {
}
c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
+ c.isHandshakeComplete.Store(true)
return nil
}
@@ -566,7 +580,7 @@ func (hs *clientHandshakeState) pickCipherSuite() error {
func (hs *clientHandshakeState) doFullHandshake() error {
c := hs.c
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -575,9 +589,8 @@ func (hs *clientHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
- hs.finishedHash.Write(certMsg.marshal())
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -595,11 +608,10 @@ func (hs *clientHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return errors.New("tls: received unexpected CertificateStatus message")
}
- hs.finishedHash.Write(cs.marshal())
c.ocspResponse = cs.response
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -628,14 +640,13 @@ func (hs *clientHandshakeState) doFullHandshake() error {
skx, ok := msg.(*serverKeyExchangeMsg)
if ok {
- hs.finishedHash.Write(skx.marshal())
err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx)
if err != nil {
c.sendAlert(alertUnexpectedMessage)
return err
}
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -646,7 +657,6 @@ func (hs *clientHandshakeState) doFullHandshake() error {
certReq, ok := msg.(*certificateRequestMsg)
if ok {
certRequested = true
- hs.finishedHash.Write(certReq.marshal())
cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq)
if chainToSend, err = c.getClientCertificate(cri); err != nil {
@@ -654,7 +664,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
return err
}
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -665,7 +675,6 @@ func (hs *clientHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(shd, msg)
}
- hs.finishedHash.Write(shd.marshal())
// If the server requested a certificate then we have to send a
// Certificate message, even if it's empty because we don't have a
@@ -673,8 +682,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
if certRequested {
certMsg = new(certificateMsg)
certMsg.certificates = chainToSend.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil {
return err
}
}
@@ -685,8 +693,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
return err
}
if ckx != nil {
- hs.finishedHash.Write(ckx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(ckx, &hs.finishedHash); err != nil {
return err
}
}
@@ -722,7 +729,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
}
}
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
+ signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash)
signOpts := crypto.SignerOpts(sigHash)
if sigType == signatureRSAPSS {
signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
@@ -733,8 +740,7 @@ func (hs *clientHandshakeState) doFullHandshake() error {
return err
}
- hs.finishedHash.Write(certVerify.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certVerify, &hs.finishedHash); err != nil {
return err
}
}
@@ -869,7 +875,10 @@ func (hs *clientHandshakeState) readFinished(out []byte) error {
return err
}
- msg, err := c.readHandshake()
+ // finishedMsg is included in the transcript, but not until after we
+ // check the client version, since the state before this message was
+ // sent is used during verification.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -885,7 +894,11 @@ func (hs *clientHandshakeState) readFinished(out []byte) error {
c.sendAlert(alertHandshakeFailure)
return errors.New("tls: server's Finished message was incorrect")
}
- hs.finishedHash.Write(serverFinished.marshal())
+
+ if err := transcriptMsg(serverFinished, &hs.finishedHash); err != nil {
+ return err
+ }
+
copy(out, verify)
return nil
}
@@ -896,7 +909,7 @@ func (hs *clientHandshakeState) readSessionTicket() error {
}
c := hs.c
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -905,7 +918,6 @@ func (hs *clientHandshakeState) readSessionTicket() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(sessionTicketMsg, msg)
}
- hs.finishedHash.Write(sessionTicketMsg.marshal())
hs.session = &clientSessionState{
sessionTicket: sessionTicketMsg.ticket,
@@ -925,31 +937,40 @@ func (hs *clientHandshakeState) readSessionTicket() error {
func (hs *clientHandshakeState) sendFinished(out []byte) error {
c := hs.c
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ if err := c.writeChangeCipherRecord(); err != nil {
return err
}
finished := new(finishedMsg)
finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil {
return err
}
copy(out, finished.verifyData)
return nil
}
+// maxRSAKeySize is the maximum RSA key size in bits that we are willing
+// to verify the signatures of during a TLS handshake.
+const maxRSAKeySize = 8192
+
// verifyServerCertificate parses and verifies the provided chain, setting
// c.verifiedChains and c.peerCertificates or sending the appropriate alert.
func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
+ activeHandles := make([]*activeCert, len(certificates))
certs := make([]*x509.Certificate, len(certificates))
for i, asn1Data := range certificates {
- cert, err := x509.ParseCertificate(asn1Data)
+ cert, err := clientCertCache.newCert(asn1Data)
if err != nil {
c.sendAlert(alertBadCertificate)
return errors.New("tls: failed to parse certificate from server: " + err.Error())
}
- certs[i] = cert
+ if cert.cert.PublicKeyAlgorithm == x509.RSA && cert.cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
+ c.sendAlert(alertBadCertificate)
+ return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
+ }
+ activeHandles[i] = cert
+ certs[i] = cert.cert
}
if !c.config.InsecureSkipVerify {
@@ -959,6 +980,7 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
DNSName: c.config.ServerName,
Intermediates: x509.NewCertPool(),
}
+
for _, cert := range certs[1:] {
opts.Intermediates.AddCert(cert)
}
@@ -966,7 +988,7 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
c.verifiedChains, err = certs[0].Verify(opts)
if err != nil {
c.sendAlert(alertBadCertificate)
- return err
+ return &CertificateVerificationError{UnverifiedCertificates: certs, Err: err}
}
}
@@ -978,6 +1000,7 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error {
return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey)
}
+ c.activeCertHandles = activeHandles
c.peerCertificates = certs
if c.config.VerifyPeerCertificate != nil {
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_client_tls13.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_client_tls13.go
similarity index 87%
rename from vendor/github.com/marten-seemann/qtls-go1-17/handshake_client_tls13.go
rename to vendor/github.com/quic-go/qtls-go1-20/handshake_client_tls13.go
index 0de59fc1e..60ae29954 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-17/handshake_client_tls13.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_client_tls13.go
@@ -8,12 +8,12 @@ import (
"bytes"
"context"
"crypto"
+ "crypto/ecdh"
"crypto/hmac"
"crypto/rsa"
"encoding/binary"
"errors"
"hash"
- "sync/atomic"
"time"
"golang.org/x/crypto/cryptobyte"
@@ -24,7 +24,7 @@ type clientHandshakeStateTLS13 struct {
ctx context.Context
serverHello *serverHelloMsg
hello *clientHelloMsg
- ecdheParams ecdheParameters
+ ecdheKey *ecdh.PrivateKey
session *clientSessionState
earlySecret []byte
@@ -39,11 +39,15 @@ type clientHandshakeStateTLS13 struct {
trafficSecret []byte // client_application_traffic_secret_0
}
-// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and,
+// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheKey, and,
// optionally, hs.session, hs.earlySecret and hs.binderKey to be set.
func (hs *clientHandshakeStateTLS13) handshake() error {
c := hs.c
+ if needFIPS() {
+ return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode")
+ }
+
// The server must not select TLS 1.3 in a renegotiation. See RFC 8446,
// sections 4.1.2 and 4.1.3.
if c.handshakes > 0 {
@@ -52,7 +56,7 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
}
// Consistency check on the presence of a keyShare and its parameters.
- if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 {
+ if hs.ecdheKey == nil || len(hs.hello.keyShares) != 1 {
return c.sendAlert(alertInternalError)
}
@@ -61,7 +65,10 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
}
hs.transcript = hs.suite.hash.New()
- hs.transcript.Write(hs.hello.marshal())
+
+ if err := transcriptMsg(hs.hello, hs.transcript); err != nil {
+ return err
+ }
if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) {
if err := hs.sendDummyChangeCipherSpec(); err != nil {
@@ -72,12 +79,15 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
}
}
- hs.transcript.Write(hs.serverHello.marshal())
+ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
+ return err
+ }
c.buffering = true
if err := hs.processServerHello(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.sendDummyChangeCipherSpec(); err != nil {
return err
}
@@ -90,6 +100,7 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
if err := hs.readServerCertificate(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.readServerFinished(); err != nil {
return err
}
@@ -103,8 +114,8 @@ func (hs *clientHandshakeStateTLS13) handshake() error {
return err
}
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
+ c.isHandshakeComplete.Store(true)
+ c.updateConnectionState()
return nil
}
@@ -171,8 +182,7 @@ func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
}
hs.sentDummyCCS = true
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
+ return hs.c.writeChangeCipherRecord()
}
// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and
@@ -187,7 +197,9 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
hs.transcript.Reset()
hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
hs.transcript.Write(chHash)
- hs.transcript.Write(hs.serverHello.marshal())
+ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
+ return err
+ }
// The only HelloRetryRequest extensions we support are key_share and
// cookie, and clients must abort the handshake if the HRR would not result
@@ -221,21 +233,21 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server selected unsupported group")
}
- if hs.ecdheParams.CurveID() == curveID {
+ if sentID, _ := curveIDForCurve(hs.ecdheKey.Curve()); sentID == curveID {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share")
}
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
+ if _, ok := curveForCurveID(curveID); !ok {
c.sendAlert(alertInternalError)
return errors.New("tls: CurvePreferences includes unsupported curve")
}
- params, err := generateECDHEParameters(c.config.rand(), curveID)
+ key, err := generateECDHEKey(c.config.rand(), curveID)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
- hs.ecdheParams = params
- hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}}
+ hs.ecdheKey = key
+ hs.hello.keyShares = []keyShare{{group: curveID, data: key.PublicKey().Bytes()}}
}
hs.hello.raw = nil
@@ -252,10 +264,18 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
transcript := hs.suite.hash.New()
transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
transcript.Write(chHash)
- transcript.Write(hs.serverHello.marshal())
- transcript.Write(hs.hello.marshalWithoutBinders())
+ if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil {
+ return err
+ }
+ helloBytes, err := hs.hello.marshalWithoutBinders()
+ if err != nil {
+ return err
+ }
+ transcript.Write(helloBytes)
pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)}
- hs.hello.updateBinders(pskBinders)
+ if err := hs.hello.updateBinders(pskBinders); err != nil {
+ return err
+ }
} else {
// Server selected a cipher suite incompatible with the PSK.
hs.hello.pskIdentities = nil
@@ -267,13 +287,12 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error {
c.extraConfig.Rejected0RTT()
}
hs.hello.earlyData = false // disable 0-RTT
-
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil {
return err
}
- msg, err := c.readHandshake()
+ // serverHelloMsg is not included in the transcript
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -314,7 +333,7 @@ func (hs *clientHandshakeStateTLS13) processServerHello() error {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server did not send a key share")
}
- if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() {
+ if sentID, _ := curveIDForCurve(hs.ecdheKey.Curve()); hs.serverHello.serverShare.group != sentID {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: server selected unsupported group")
}
@@ -352,8 +371,13 @@ func (hs *clientHandshakeStateTLS13) processServerHello() error {
func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
c := hs.c
- sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data)
- if sharedKey == nil {
+ peerKey, err := hs.ecdheKey.Curve().NewPublicKey(hs.serverHello.serverShare.data)
+ if err != nil {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: invalid server key share")
+ }
+ sharedKey, err := hs.ecdheKey.ECDH(peerKey)
+ if err != nil {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: invalid server key share")
}
@@ -362,6 +386,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
if !hs.usingPSK {
earlySecret = hs.suite.extract(nil, nil)
}
+
handshakeSecret := hs.suite.extract(sharedKey,
hs.suite.deriveSecret(earlySecret, "derived", nil))
@@ -374,7 +399,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
c.in.exportKey(EncryptionHandshake, hs.suite, serverSecret)
c.in.setTrafficSecret(hs.suite, serverSecret)
- err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
+ err = c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret)
if err != nil {
c.sendAlert(alertInternalError)
return err
@@ -394,7 +419,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error {
func (hs *clientHandshakeStateTLS13) readServerParameters() error {
c := hs.c
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(hs.transcript)
if err != nil {
return err
}
@@ -412,7 +437,6 @@ func (hs *clientHandshakeStateTLS13) readServerParameters() error {
if hs.c.extraConfig != nil && hs.c.extraConfig.ReceivedExtensions != nil {
hs.c.extraConfig.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions)
}
- hs.transcript.Write(encryptedExtensions.marshal())
if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil {
c.sendAlert(alertUnsupportedExtension)
@@ -448,18 +472,16 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
return nil
}
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(hs.transcript)
if err != nil {
return err
}
certReq, ok := msg.(*certificateRequestMsgTLS13)
if ok {
- hs.transcript.Write(certReq.marshal())
-
hs.certReq = certReq
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(hs.transcript)
if err != nil {
return err
}
@@ -474,7 +496,6 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
c.sendAlert(alertDecodeError)
return errors.New("tls: received empty certificates message")
}
- hs.transcript.Write(certMsg.marshal())
c.scts = certMsg.certificate.SignedCertificateTimestamps
c.ocspResponse = certMsg.certificate.OCSPStaple
@@ -483,7 +504,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
return err
}
- msg, err = c.readHandshake()
+ // certificateVerifyMsg is included in the transcript, but not until
+ // after we verify the handshake signature, since the state before
+ // this message was sent is used.
+ msg, err = c.readHandshake(nil)
if err != nil {
return err
}
@@ -495,7 +519,7 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
}
// See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
+ if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms()) {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: certificate used with invalid signature algorithm")
}
@@ -514,7 +538,9 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
return errors.New("tls: invalid signature by the server certificate: " + err.Error())
}
- hs.transcript.Write(certVerify.marshal())
+ if err := transcriptMsg(certVerify, hs.transcript); err != nil {
+ return err
+ }
return nil
}
@@ -522,7 +548,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error {
func (hs *clientHandshakeStateTLS13) readServerFinished() error {
c := hs.c
- msg, err := c.readHandshake()
+ // finishedMsg is included in the transcript, but not until after we
+ // check the client version, since the state before this message was
+ // sent is used during verification.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -539,7 +568,9 @@ func (hs *clientHandshakeStateTLS13) readServerFinished() error {
return errors.New("tls: invalid server finished hash")
}
- hs.transcript.Write(finished.marshal())
+ if err := transcriptMsg(finished, hs.transcript); err != nil {
+ return err
+ }
// Derive secrets that take context through the server Finished.
@@ -589,8 +620,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0
certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil {
return err
}
@@ -627,8 +657,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error {
}
certVerifyMsg.signature = sig
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil {
return err
}
@@ -642,8 +671,7 @@ func (hs *clientHandshakeStateTLS13) sendClientFinished() error {
verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
}
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil {
return err
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_messages.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_messages.go
similarity index 75%
rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_messages.go
rename to vendor/github.com/quic-go/qtls-go1-20/handshake_messages.go
index 5f87d4b81..c69fcefda 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_messages.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_messages.go
@@ -5,6 +5,7 @@
package qtls
import (
+ "errors"
"fmt"
"strings"
@@ -95,9 +96,187 @@ type clientHelloMsg struct {
additionalExtensions []Extension
}
-func (m *clientHelloMsg) marshal() []byte {
+func (m *clientHelloMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
+ }
+
+ var exts cryptobyte.Builder
+ if len(m.serverName) > 0 {
+ // RFC 6066, Section 3
+ exts.AddUint16(extensionServerName)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8(0) // name_type = host_name
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes([]byte(m.serverName))
+ })
+ })
+ })
+ }
+ if m.ocspStapling {
+ // RFC 4366, Section 3.6
+ exts.AddUint16(extensionStatusRequest)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8(1) // status_type = ocsp
+ exts.AddUint16(0) // empty responder_id_list
+ exts.AddUint16(0) // empty request_extensions
+ })
+ }
+ if len(m.supportedCurves) > 0 {
+ // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
+ exts.AddUint16(extensionSupportedCurves)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, curve := range m.supportedCurves {
+ exts.AddUint16(uint16(curve))
+ }
+ })
+ })
+ }
+ if len(m.supportedPoints) > 0 {
+ // RFC 4492, Section 5.1.2
+ exts.AddUint16(extensionSupportedPoints)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.supportedPoints)
+ })
+ })
+ }
+ if m.ticketSupported {
+ // RFC 5077, Section 3.2
+ exts.AddUint16(extensionSessionTicket)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.sessionTicket)
+ })
+ }
+ if len(m.supportedSignatureAlgorithms) > 0 {
+ // RFC 5246, Section 7.4.1.4.1
+ exts.AddUint16(extensionSignatureAlgorithms)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithms {
+ exts.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if len(m.supportedSignatureAlgorithmsCert) > 0 {
+ // RFC 8446, Section 4.2.3
+ exts.AddUint16(extensionSignatureAlgorithmsCert)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
+ exts.AddUint16(uint16(sigAlgo))
+ }
+ })
+ })
+ }
+ if m.secureRenegotiationSupported {
+ // RFC 5746, Section 3.2
+ exts.AddUint16(extensionRenegotiationInfo)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.secureRenegotiation)
+ })
+ })
+ }
+ if len(m.alpnProtocols) > 0 {
+ // RFC 7301, Section 3.1
+ exts.AddUint16(extensionALPN)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, proto := range m.alpnProtocols {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes([]byte(proto))
+ })
+ }
+ })
+ })
+ }
+ if m.scts {
+ // RFC 6962, Section 3.3.1
+ exts.AddUint16(extensionSCT)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if len(m.supportedVersions) > 0 {
+ // RFC 8446, Section 4.2.1
+ exts.AddUint16(extensionSupportedVersions)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, vers := range m.supportedVersions {
+ exts.AddUint16(vers)
+ }
+ })
+ })
+ }
+ if len(m.cookie) > 0 {
+ // RFC 8446, Section 4.2.2
+ exts.AddUint16(extensionCookie)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.cookie)
+ })
+ })
+ }
+ if len(m.keyShares) > 0 {
+ // RFC 8446, Section 4.2.8
+ exts.AddUint16(extensionKeyShare)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, ks := range m.keyShares {
+ exts.AddUint16(uint16(ks.group))
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(ks.data)
+ })
+ }
+ })
+ })
+ }
+ if m.earlyData {
+ // RFC 8446, Section 4.2.10
+ exts.AddUint16(extensionEarlyData)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if len(m.pskModes) > 0 {
+ // RFC 8446, Section 4.2.9
+ exts.AddUint16(extensionPSKModes)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.pskModes)
+ })
+ })
+ }
+ for _, ext := range m.additionalExtensions {
+ exts.AddUint16(ext.Type)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(ext.Data)
+ })
+ }
+ if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
+ // RFC 8446, Section 4.2.11
+ exts.AddUint16(extensionPreSharedKey)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, psk := range m.pskIdentities {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(psk.label)
+ })
+ exts.AddUint32(psk.obfuscatedTicketAge)
+ }
+ })
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, binder := range m.pskBinders {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(binder)
+ })
+ }
+ })
+ })
+ }
+ extBytes, err := exts.Bytes()
+ if err != nil {
+ return nil, err
}
var b cryptobyte.Builder
@@ -117,225 +296,53 @@ func (m *clientHelloMsg) marshal() []byte {
b.AddBytes(m.compressionMethods)
})
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if len(m.serverName) > 0 {
- // RFC 6066, Section 3
- b.AddUint16(extensionServerName)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(0) // name_type = host_name
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.serverName))
- })
- })
- })
- }
- if m.ocspStapling {
- // RFC 4366, Section 3.6
- b.AddUint16(extensionStatusRequest)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8(1) // status_type = ocsp
- b.AddUint16(0) // empty responder_id_list
- b.AddUint16(0) // empty request_extensions
- })
- }
- if len(m.supportedCurves) > 0 {
- // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7
- b.AddUint16(extensionSupportedCurves)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, curve := range m.supportedCurves {
- b.AddUint16(uint16(curve))
- }
- })
- })
- }
- if len(m.supportedPoints) > 0 {
- // RFC 4492, Section 5.1.2
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
- if m.ticketSupported {
- // RFC 5077, Section 3.2
- b.AddUint16(extensionSessionTicket)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.sessionTicket)
- })
- }
- if len(m.supportedSignatureAlgorithms) > 0 {
- // RFC 5246, Section 7.4.1.4.1
- b.AddUint16(extensionSignatureAlgorithms)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithms {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if len(m.supportedSignatureAlgorithmsCert) > 0 {
- // RFC 8446, Section 4.2.3
- b.AddUint16(extensionSignatureAlgorithmsCert)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sigAlgo := range m.supportedSignatureAlgorithmsCert {
- b.AddUint16(uint16(sigAlgo))
- }
- })
- })
- }
- if m.secureRenegotiationSupported {
- // RFC 5746, Section 3.2
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocols) > 0 {
- // RFC 7301, Section 3.1
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, proto := range m.alpnProtocols {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(proto))
- })
- }
- })
- })
- }
- if m.scts {
- // RFC 6962, Section 3.3.1
- b.AddUint16(extensionSCT)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.supportedVersions) > 0 {
- // RFC 8446, Section 4.2.1
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, vers := range m.supportedVersions {
- b.AddUint16(vers)
- }
- })
- })
- }
- if len(m.cookie) > 0 {
- // RFC 8446, Section 4.2.2
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if len(m.keyShares) > 0 {
- // RFC 8446, Section 4.2.8
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, ks := range m.keyShares {
- b.AddUint16(uint16(ks.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ks.data)
- })
- }
- })
- })
- }
- if m.earlyData {
- // RFC 8446, Section 4.2.10
- b.AddUint16(extensionEarlyData)
- b.AddUint16(0) // empty extension_data
- }
- if len(m.pskModes) > 0 {
- // RFC 8446, Section 4.2.9
- b.AddUint16(extensionPSKModes)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.pskModes)
- })
- })
- }
- for _, ext := range m.additionalExtensions {
- b.AddUint16(ext.Type)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(ext.Data)
- })
- }
- if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension
- // RFC 8446, Section 4.2.11
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, psk := range m.pskIdentities {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(psk.label)
- })
- b.AddUint32(psk.obfuscatedTicketAge)
- }
- })
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, binder := range m.pskBinders {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(binder)
- })
- }
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
+ if len(extBytes) > 0 {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(extBytes)
+ })
}
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
// marshalWithoutBinders returns the ClientHello through the
// PreSharedKeyExtension.identities field, according to RFC 8446, Section
// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length.
-func (m *clientHelloMsg) marshalWithoutBinders() []byte {
+func (m *clientHelloMsg) marshalWithoutBinders() ([]byte, error) {
bindersLen := 2 // uint16 length prefix
for _, binder := range m.pskBinders {
bindersLen += 1 // uint8 length prefix
bindersLen += len(binder)
}
- fullMessage := m.marshal()
- return fullMessage[:len(fullMessage)-bindersLen]
+ fullMessage, err := m.marshal()
+ if err != nil {
+ return nil, err
+ }
+ return fullMessage[:len(fullMessage)-bindersLen], nil
}
// updateBinders updates the m.pskBinders field, if necessary updating the
// cached marshaled representation. The supplied binders must have the same
// length as the current m.pskBinders.
-func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
+func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) error {
if len(pskBinders) != len(m.pskBinders) {
- panic("tls: internal error: pskBinders length mismatch")
+ return errors.New("tls: internal error: pskBinders length mismatch")
}
for i := range m.pskBinders {
if len(pskBinders[i]) != len(m.pskBinders[i]) {
- panic("tls: internal error: pskBinders length mismatch")
+ return errors.New("tls: internal error: pskBinders length mismatch")
}
}
m.pskBinders = pskBinders
if m.raw != nil {
- lenWithoutBinders := len(m.marshalWithoutBinders())
+ helloBytes, err := m.marshalWithoutBinders()
+ if err != nil {
+ return err
+ }
+ lenWithoutBinders := len(helloBytes)
b := cryptobyte.NewFixedBuilder(m.raw[:lenWithoutBinders])
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
for _, binder := range m.pskBinders {
@@ -345,9 +352,11 @@ func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) {
}
})
if out, err := b.Bytes(); err != nil || len(out) != len(m.raw) {
- panic("tls: internal error: failed to update binders")
+ return errors.New("tls: internal error: failed to update binders")
}
}
+
+ return nil
}
func (m *clientHelloMsg) unmarshal(data []byte) bool {
@@ -391,15 +400,21 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
return false
}
+ seenExts := make(map[uint16]bool)
for !extensions.Empty() {
- var ext uint16
+ var extension uint16
var extData cryptobyte.String
- if !extensions.ReadUint16(&ext) ||
+ if !extensions.ReadUint16(&extension) ||
!extensions.ReadUint16LengthPrefixed(&extData) {
return false
}
- switch ext {
+ if seenExts[extension] {
+ return false
+ }
+ seenExts[extension] = true
+
+ switch extension {
case extensionServerName:
// RFC 6066, Section 3
var nameList cryptobyte.String
@@ -583,7 +598,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
m.pskBinders = append(m.pskBinders, binder)
}
default:
- m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData})
+ m.additionalExtensions = append(m.additionalExtensions, Extension{Type: extension, Data: extData})
continue
}
@@ -619,9 +634,98 @@ type serverHelloMsg struct {
selectedGroup CurveID
}
-func (m *serverHelloMsg) marshal() []byte {
+func (m *serverHelloMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
+ }
+
+ var exts cryptobyte.Builder
+ if m.ocspStapling {
+ exts.AddUint16(extensionStatusRequest)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if m.ticketSupported {
+ exts.AddUint16(extensionSessionTicket)
+ exts.AddUint16(0) // empty extension_data
+ }
+ if m.secureRenegotiationSupported {
+ exts.AddUint16(extensionRenegotiationInfo)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.secureRenegotiation)
+ })
+ })
+ }
+ if len(m.alpnProtocol) > 0 {
+ exts.AddUint16(extensionALPN)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes([]byte(m.alpnProtocol))
+ })
+ })
+ })
+ }
+ if len(m.scts) > 0 {
+ exts.AddUint16(extensionSCT)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ for _, sct := range m.scts {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(sct)
+ })
+ }
+ })
+ })
+ }
+ if m.supportedVersion != 0 {
+ exts.AddUint16(extensionSupportedVersions)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(m.supportedVersion)
+ })
+ }
+ if m.serverShare.group != 0 {
+ exts.AddUint16(extensionKeyShare)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(uint16(m.serverShare.group))
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.serverShare.data)
+ })
+ })
+ }
+ if m.selectedIdentityPresent {
+ exts.AddUint16(extensionPreSharedKey)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(m.selectedIdentity)
+ })
+ }
+
+ if len(m.cookie) > 0 {
+ exts.AddUint16(extensionCookie)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.cookie)
+ })
+ })
+ }
+ if m.selectedGroup != 0 {
+ exts.AddUint16(extensionKeyShare)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint16(uint16(m.selectedGroup))
+ })
+ }
+ if len(m.supportedPoints) > 0 {
+ exts.AddUint16(extensionSupportedPoints)
+ exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) {
+ exts.AddBytes(m.supportedPoints)
+ })
+ })
+ }
+
+ extBytes, err := exts.Bytes()
+ if err != nil {
+ return nil, err
}
var b cryptobyte.Builder
@@ -635,104 +739,15 @@ func (m *serverHelloMsg) marshal() []byte {
b.AddUint16(m.cipherSuite)
b.AddUint8(m.compressionMethod)
- // If extensions aren't present, omit them.
- var extensionsPresent bool
- bWithoutExtensions := *b
-
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- if m.ocspStapling {
- b.AddUint16(extensionStatusRequest)
- b.AddUint16(0) // empty extension_data
- }
- if m.ticketSupported {
- b.AddUint16(extensionSessionTicket)
- b.AddUint16(0) // empty extension_data
- }
- if m.secureRenegotiationSupported {
- b.AddUint16(extensionRenegotiationInfo)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.secureRenegotiation)
- })
- })
- }
- if len(m.alpnProtocol) > 0 {
- b.AddUint16(extensionALPN)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(m.alpnProtocol))
- })
- })
- })
- }
- if len(m.scts) > 0 {
- b.AddUint16(extensionSCT)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- for _, sct := range m.scts {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(sct)
- })
- }
- })
- })
- }
- if m.supportedVersion != 0 {
- b.AddUint16(extensionSupportedVersions)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.supportedVersion)
- })
- }
- if m.serverShare.group != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.serverShare.group))
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.serverShare.data)
- })
- })
- }
- if m.selectedIdentityPresent {
- b.AddUint16(extensionPreSharedKey)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(m.selectedIdentity)
- })
- }
-
- if len(m.cookie) > 0 {
- b.AddUint16(extensionCookie)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.cookie)
- })
- })
- }
- if m.selectedGroup != 0 {
- b.AddUint16(extensionKeyShare)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint16(uint16(m.selectedGroup))
- })
- }
- if len(m.supportedPoints) > 0 {
- b.AddUint16(extensionSupportedPoints)
- b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(m.supportedPoints)
- })
- })
- }
-
- extensionsPresent = len(b.BytesOrPanic()) > 2
- })
-
- if !extensionsPresent {
- *b = bWithoutExtensions
+ if len(extBytes) > 0 {
+ b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(extBytes)
+ })
}
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *serverHelloMsg) unmarshal(data []byte) bool {
@@ -757,6 +772,7 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool {
return false
}
+ seenExts := make(map[uint16]bool)
for !extensions.Empty() {
var extension uint16
var extData cryptobyte.String
@@ -765,6 +781,11 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool {
return false
}
+ if seenExts[extension] {
+ return false
+ }
+ seenExts[extension] = true
+
switch extension {
case extensionStatusRequest:
m.ocspStapling = true
@@ -853,9 +874,9 @@ type encryptedExtensionsMsg struct {
additionalExtensions []Extension
}
-func (m *encryptedExtensionsMsg) marshal() []byte {
+func (m *encryptedExtensionsMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -886,8 +907,9 @@ func (m *encryptedExtensionsMsg) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
@@ -937,10 +959,10 @@ func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool {
type endOfEarlyDataMsg struct{}
-func (m *endOfEarlyDataMsg) marshal() []byte {
+func (m *endOfEarlyDataMsg) marshal() ([]byte, error) {
x := make([]byte, 4)
x[0] = typeEndOfEarlyData
- return x
+ return x, nil
}
func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool {
@@ -952,9 +974,9 @@ type keyUpdateMsg struct {
updateRequested bool
}
-func (m *keyUpdateMsg) marshal() []byte {
+func (m *keyUpdateMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -967,8 +989,9 @@ func (m *keyUpdateMsg) marshal() []byte {
}
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *keyUpdateMsg) unmarshal(data []byte) bool {
@@ -1000,9 +1023,9 @@ type newSessionTicketMsgTLS13 struct {
maxEarlyData uint32
}
-func (m *newSessionTicketMsgTLS13) marshal() []byte {
+func (m *newSessionTicketMsgTLS13) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1027,8 +1050,9 @@ func (m *newSessionTicketMsgTLS13) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool {
@@ -1081,9 +1105,9 @@ type certificateRequestMsgTLS13 struct {
certificateAuthorities [][]byte
}
-func (m *certificateRequestMsgTLS13) marshal() []byte {
+func (m *certificateRequestMsgTLS13) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1142,8 +1166,9 @@ func (m *certificateRequestMsgTLS13) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool {
@@ -1227,9 +1252,9 @@ type certificateMsg struct {
certificates [][]byte
}
-func (m *certificateMsg) marshal() (x []byte) {
+func (m *certificateMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var i int
@@ -1238,7 +1263,7 @@ func (m *certificateMsg) marshal() (x []byte) {
}
length := 3 + 3*len(m.certificates) + i
- x = make([]byte, 4+length)
+ x := make([]byte, 4+length)
x[0] = typeCertificate
x[1] = uint8(length >> 16)
x[2] = uint8(length >> 8)
@@ -1259,7 +1284,7 @@ func (m *certificateMsg) marshal() (x []byte) {
}
m.raw = x
- return
+ return m.raw, nil
}
func (m *certificateMsg) unmarshal(data []byte) bool {
@@ -1306,9 +1331,9 @@ type certificateMsgTLS13 struct {
scts bool
}
-func (m *certificateMsgTLS13) marshal() []byte {
+func (m *certificateMsgTLS13) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1326,8 +1351,9 @@ func (m *certificateMsgTLS13) marshal() []byte {
marshalCertificate(b, certificate)
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) {
@@ -1450,9 +1476,9 @@ type serverKeyExchangeMsg struct {
key []byte
}
-func (m *serverKeyExchangeMsg) marshal() []byte {
+func (m *serverKeyExchangeMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
length := len(m.key)
x := make([]byte, length+4)
@@ -1463,7 +1489,7 @@ func (m *serverKeyExchangeMsg) marshal() []byte {
copy(x[4:], m.key)
m.raw = x
- return x
+ return x, nil
}
func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool {
@@ -1480,9 +1506,9 @@ type certificateStatusMsg struct {
response []byte
}
-func (m *certificateStatusMsg) marshal() []byte {
+func (m *certificateStatusMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1494,8 +1520,9 @@ func (m *certificateStatusMsg) marshal() []byte {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *certificateStatusMsg) unmarshal(data []byte) bool {
@@ -1514,10 +1541,10 @@ func (m *certificateStatusMsg) unmarshal(data []byte) bool {
type serverHelloDoneMsg struct{}
-func (m *serverHelloDoneMsg) marshal() []byte {
+func (m *serverHelloDoneMsg) marshal() ([]byte, error) {
x := make([]byte, 4)
x[0] = typeServerHelloDone
- return x
+ return x, nil
}
func (m *serverHelloDoneMsg) unmarshal(data []byte) bool {
@@ -1529,9 +1556,9 @@ type clientKeyExchangeMsg struct {
ciphertext []byte
}
-func (m *clientKeyExchangeMsg) marshal() []byte {
+func (m *clientKeyExchangeMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
length := len(m.ciphertext)
x := make([]byte, length+4)
@@ -1542,7 +1569,7 @@ func (m *clientKeyExchangeMsg) marshal() []byte {
copy(x[4:], m.ciphertext)
m.raw = x
- return x
+ return x, nil
}
func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool {
@@ -1563,9 +1590,9 @@ type finishedMsg struct {
verifyData []byte
}
-func (m *finishedMsg) marshal() []byte {
+func (m *finishedMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1574,8 +1601,9 @@ func (m *finishedMsg) marshal() []byte {
b.AddBytes(m.verifyData)
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *finishedMsg) unmarshal(data []byte) bool {
@@ -1597,9 +1625,9 @@ type certificateRequestMsg struct {
certificateAuthorities [][]byte
}
-func (m *certificateRequestMsg) marshal() (x []byte) {
+func (m *certificateRequestMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
// See RFC 4346, Section 7.4.4.
@@ -1614,7 +1642,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
length += 2 + 2*len(m.supportedSignatureAlgorithms)
}
- x = make([]byte, 4+length)
+ x := make([]byte, 4+length)
x[0] = typeCertificateRequest
x[1] = uint8(length >> 16)
x[2] = uint8(length >> 8)
@@ -1649,7 +1677,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) {
}
m.raw = x
- return
+ return m.raw, nil
}
func (m *certificateRequestMsg) unmarshal(data []byte) bool {
@@ -1735,9 +1763,9 @@ type certificateVerifyMsg struct {
signature []byte
}
-func (m *certificateVerifyMsg) marshal() (x []byte) {
+func (m *certificateVerifyMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
var b cryptobyte.Builder
@@ -1751,8 +1779,9 @@ func (m *certificateVerifyMsg) marshal() (x []byte) {
})
})
- m.raw = b.BytesOrPanic()
- return m.raw
+ var err error
+ m.raw, err = b.Bytes()
+ return m.raw, err
}
func (m *certificateVerifyMsg) unmarshal(data []byte) bool {
@@ -1775,15 +1804,15 @@ type newSessionTicketMsg struct {
ticket []byte
}
-func (m *newSessionTicketMsg) marshal() (x []byte) {
+func (m *newSessionTicketMsg) marshal() ([]byte, error) {
if m.raw != nil {
- return m.raw
+ return m.raw, nil
}
// See RFC 5077, Section 3.3.
ticketLen := len(m.ticket)
length := 2 + 4 + ticketLen
- x = make([]byte, 4+length)
+ x := make([]byte, 4+length)
x[0] = typeNewSessionTicket
x[1] = uint8(length >> 16)
x[2] = uint8(length >> 8)
@@ -1794,7 +1823,7 @@ func (m *newSessionTicketMsg) marshal() (x []byte) {
m.raw = x
- return
+ return m.raw, nil
}
func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
@@ -1822,10 +1851,25 @@ func (m *newSessionTicketMsg) unmarshal(data []byte) bool {
type helloRequestMsg struct {
}
-func (*helloRequestMsg) marshal() []byte {
- return []byte{typeHelloRequest, 0, 0, 0}
+func (*helloRequestMsg) marshal() ([]byte, error) {
+ return []byte{typeHelloRequest, 0, 0, 0}, nil
}
func (*helloRequestMsg) unmarshal(data []byte) bool {
return len(data) == 4
}
+
+type transcriptHash interface {
+ Write([]byte) (int, error)
+}
+
+// transcriptMsg is a helper used to marshal and hash messages which typically
+// are not written to the wire, and as such aren't hashed during Conn.writeRecord.
+func transcriptMsg(msg handshakeMessage, h transcriptHash) error {
+ data, err := msg.marshal()
+ if err != nil {
+ return err
+ }
+ h.Write(data)
+ return nil
+}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_server.go
similarity index 89%
rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_server.go
rename to vendor/github.com/quic-go/qtls-go1-20/handshake_server.go
index 2fe82848c..05321cfb6 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_server.go
@@ -16,7 +16,6 @@ import (
"fmt"
"hash"
"io"
- "sync/atomic"
"time"
)
@@ -130,14 +129,17 @@ func (hs *serverHandshakeState) handshake() error {
}
c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random)
- atomic.StoreUint32(&c.handshakeStatus, 1)
+ c.isHandshakeComplete.Store(true)
+ c.updateConnectionState()
return nil
}
// readClientHello reads a ClientHello message and selects the protocol version.
func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) {
- msg, err := c.readHandshake()
+ // clientHelloMsg is included in the transcript, but we haven't initialized
+ // it yet. The respective handshake functions will record it themselves.
+ msg, err := c.readHandshake(nil)
if err != nil {
return nil, err
}
@@ -270,7 +272,7 @@ func (hs *serverHandshakeState) processClientHello() error {
hs.ecdheOk = supportsECDHE(c.config, hs.clientHello.supportedCurves, hs.clientHello.supportedPoints)
- if hs.ecdheOk {
+ if hs.ecdheOk && len(hs.clientHello.supportedPoints) > 0 {
// Although omitting the ec_point_formats extension is permitted, some
// old OpenSSL version will refuse to handshake if not present.
//
@@ -351,6 +353,13 @@ func supportsECDHE(c *config, supportedCurves []CurveID, supportedPoints []uint8
break
}
}
+ // Per RFC 8422, Section 5.1.2, if the Supported Point Formats extension is
+ // missing, uncompressed points are supported. If supportedPoints is empty,
+ // the extension must be missing, as an empty extension body is rejected by
+ // the parser. See https://go.dev/issue/49126.
+ if len(supportedPoints) == 0 {
+ supportsPointFormat = true
+ }
return supportsCurve && supportsPointFormat
}
@@ -486,9 +495,10 @@ func (hs *serverHandshakeState) doResumeHandshake() error {
hs.hello.ticketSupported = hs.sessionState.usedOldKey
hs.finishedHash = newFinishedHash(c.vers, hs.suite)
hs.finishedHash.discardHandshakeBuffer()
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil {
+ return err
+ }
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil {
return err
}
@@ -526,24 +536,23 @@ func (hs *serverHandshakeState) doFullHandshake() error {
// certificates won't be used.
hs.finishedHash.discardHandshakeBuffer()
}
- hs.finishedHash.Write(hs.clientHello.marshal())
- hs.finishedHash.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil {
+ return err
+ }
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil {
return err
}
certMsg := new(certificateMsg)
certMsg.certificates = hs.cert.Certificate
- hs.finishedHash.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil {
return err
}
if hs.hello.ocspStapling {
certStatus := new(certificateStatusMsg)
certStatus.response = hs.cert.OCSPStaple
- hs.finishedHash.Write(certStatus.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certStatus, &hs.finishedHash); err != nil {
return err
}
}
@@ -555,8 +564,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
return err
}
if skx != nil {
- hs.finishedHash.Write(skx.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(skx, &hs.finishedHash); err != nil {
return err
}
}
@@ -571,7 +579,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
}
if c.vers >= VersionTLS12 {
certReq.hasSignatureAlgorithm = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
+ certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
}
// An empty list of certificateAuthorities signals to
@@ -582,15 +590,13 @@ func (hs *serverHandshakeState) doFullHandshake() error {
if c.config.ClientCAs != nil {
certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
}
- hs.finishedHash.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certReq, &hs.finishedHash); err != nil {
return err
}
}
helloDone := new(serverHelloDoneMsg)
- hs.finishedHash.Write(helloDone.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(helloDone, &hs.finishedHash); err != nil {
return err
}
@@ -600,7 +606,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
var pub crypto.PublicKey // public key for client auth, if any
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -613,7 +619,6 @@ func (hs *serverHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
- hs.finishedHash.Write(certMsg.marshal())
if err := c.processCertsFromClient(Certificate{
Certificate: certMsg.certificates,
@@ -624,7 +629,7 @@ func (hs *serverHandshakeState) doFullHandshake() error {
pub = c.peerCertificates[0].PublicKey
}
- msg, err = c.readHandshake()
+ msg, err = c.readHandshake(&hs.finishedHash)
if err != nil {
return err
}
@@ -642,7 +647,6 @@ func (hs *serverHandshakeState) doFullHandshake() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(ckx, msg)
}
- hs.finishedHash.Write(ckx.marshal())
preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers)
if err != nil {
@@ -662,7 +666,10 @@ func (hs *serverHandshakeState) doFullHandshake() error {
// to the client's certificate. This allows us to verify that the client is in
// possession of the private key of the certificate.
if len(c.peerCertificates) > 0 {
- msg, err = c.readHandshake()
+ // certificateVerifyMsg is included in the transcript, but not until
+ // after we verify the handshake signature, since the state before
+ // this message was sent is used.
+ msg, err = c.readHandshake(nil)
if err != nil {
return err
}
@@ -691,13 +698,15 @@ func (hs *serverHandshakeState) doFullHandshake() error {
}
}
- signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret)
+ signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash)
if err := verifyHandshakeSignature(sigType, pub, sigHash, signed, certVerify.signature); err != nil {
c.sendAlert(alertDecryptError)
return errors.New("tls: invalid signature by the client certificate: " + err.Error())
}
- hs.finishedHash.Write(certVerify.marshal())
+ if err := transcriptMsg(certVerify, &hs.finishedHash); err != nil {
+ return err
+ }
}
hs.finishedHash.discardHandshakeBuffer()
@@ -737,7 +746,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error {
return err
}
- msg, err := c.readHandshake()
+ // finishedMsg is included in the transcript, but not until after we
+ // check the client version, since the state before this message was
+ // sent is used during verification.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -754,7 +766,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error {
return errors.New("tls: client's Finished message is incorrect")
}
- hs.finishedHash.Write(clientFinished.marshal())
+ if err := transcriptMsg(clientFinished, &hs.finishedHash); err != nil {
+ return err
+ }
+
copy(out, verify)
return nil
}
@@ -788,14 +803,16 @@ func (hs *serverHandshakeState) sendSessionTicket() error {
masterSecret: hs.masterSecret,
certificates: certsFromClient,
}
- var err error
- m.ticket, err = c.encryptTicket(state.marshal())
+ stateBytes, err := state.marshal()
+ if err != nil {
+ return err
+ }
+ m.ticket, err = c.encryptTicket(stateBytes)
if err != nil {
return err
}
- hs.finishedHash.Write(m.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(m, &hs.finishedHash); err != nil {
return err
}
@@ -805,14 +822,13 @@ func (hs *serverHandshakeState) sendSessionTicket() error {
func (hs *serverHandshakeState) sendFinished(out []byte) error {
c := hs.c
- if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ if err := c.writeChangeCipherRecord(); err != nil {
return err
}
finished := new(finishedMsg)
finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
- hs.finishedHash.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil {
return err
}
@@ -833,6 +849,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
c.sendAlert(alertBadCertificate)
return errors.New("tls: failed to parse client certificate: " + err.Error())
}
+ if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize {
+ c.sendAlert(alertBadCertificate)
+ return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize)
+ }
}
if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) {
@@ -855,7 +875,7 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error {
chains, err := certs[0].Verify(opts)
if err != nil {
c.sendAlert(alertBadCertificate)
- return errors.New("tls: failed to verify client certificate: " + err.Error())
+ return &CertificateVerificationError{UnverifiedCertificates: certs, Err: err}
}
c.verifiedChains = chains
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server_tls13.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_server_tls13.go
similarity index 89%
rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_server_tls13.go
rename to vendor/github.com/quic-go/qtls-go1-20/handshake_server_tls13.go
index dd8d801e6..6189c7806 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server_tls13.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_server_tls13.go
@@ -13,7 +13,6 @@ import (
"errors"
"hash"
"io"
- "sync/atomic"
"time"
)
@@ -46,6 +45,10 @@ type serverHandshakeStateTLS13 struct {
func (hs *serverHandshakeStateTLS13) handshake() error {
c := hs.c
+ if needFIPS() {
+ return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode")
+ }
+
// For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2.
if err := hs.processClientHello(); err != nil {
return err
@@ -53,6 +56,7 @@ func (hs *serverHandshakeStateTLS13) handshake() error {
if err := hs.checkForResumption(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.pickCertificate(); err != nil {
return err
}
@@ -75,12 +79,13 @@ func (hs *serverHandshakeStateTLS13) handshake() error {
if err := hs.readClientCertificate(); err != nil {
return err
}
+ c.updateConnectionState()
if err := hs.readClientFinished(); err != nil {
return err
}
- atomic.StoreUint32(&c.handshakeStatus, 1)
-
+ c.isHandshakeComplete.Store(true)
+ c.updateConnectionState()
return nil
}
@@ -141,27 +146,14 @@ func (hs *serverHandshakeStateTLS13) processClientHello() error {
hs.hello.sessionId = hs.clientHello.sessionId
hs.hello.compressionMethod = compressionNone
- if hs.suite == nil {
- var preferenceList []uint16
- for _, suiteID := range c.config.CipherSuites {
- for _, suite := range cipherSuitesTLS13 {
- if suite.id == suiteID {
- preferenceList = append(preferenceList, suiteID)
- break
- }
- }
- }
- if len(preferenceList) == 0 {
- preferenceList = defaultCipherSuitesTLS13
- if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
- preferenceList = defaultCipherSuitesTLS13NoAES
- }
- }
- for _, suiteID := range preferenceList {
- hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
- if hs.suite != nil {
- break
- }
+ preferenceList := defaultCipherSuitesTLS13
+ if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) {
+ preferenceList = defaultCipherSuitesTLS13NoAES
+ }
+ for _, suiteID := range preferenceList {
+ hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID)
+ if hs.suite != nil {
+ break
}
}
if hs.suite == nil {
@@ -206,18 +198,23 @@ GroupSelection:
clientKeyShare = &hs.clientHello.keyShares[0]
}
- if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok {
+ if _, ok := curveForCurveID(selectedGroup); !ok {
c.sendAlert(alertInternalError)
return errors.New("tls: CurvePreferences includes unsupported curve")
}
- params, err := generateECDHEParameters(c.config.rand(), selectedGroup)
+ key, err := generateECDHEKey(c.config.rand(), selectedGroup)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
- hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()}
- hs.sharedKey = params.SharedKey(clientKeyShare.data)
- if hs.sharedKey == nil {
+ hs.hello.serverShare = keyShare{group: selectedGroup, data: key.PublicKey().Bytes()}
+ peerKey, err := key.Curve().NewPublicKey(clientKeyShare.data)
+ if err != nil {
+ c.sendAlert(alertIllegalParameter)
+ return errors.New("tls: invalid client key share")
+ }
+ hs.sharedKey, err = key.ECDH(peerKey)
+ if err != nil {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: invalid client key share")
}
@@ -328,7 +325,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
c.sendAlert(alertInternalError)
return errors.New("tls: internal error: failed to clone hash")
}
- transcript.Write(hs.clientHello.marshalWithoutBinders())
+ clientHelloBytes, err := hs.clientHello.marshalWithoutBinders()
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ transcript.Write(clientHelloBytes)
pskBinder := hs.suite.finishedHash(binderKey, transcript)
if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) {
c.sendAlert(alertDecryptError)
@@ -341,7 +343,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error {
}
h := cloneHash(hs.transcript, hs.suite.hash)
- h.Write(hs.clientHello.marshal())
+ clientHelloWithBindersBytes, err := hs.clientHello.marshal()
+ if err != nil {
+ c.sendAlert(alertInternalError)
+ return err
+ }
+ h.Write(clientHelloWithBindersBytes)
if hs.encryptedExtensions.earlyData {
clientEarlySecret := hs.suite.deriveSecret(hs.earlySecret, "c e traffic", h)
c.in.exportKey(Encryption0RTT, hs.suite, clientEarlySecret)
@@ -430,8 +437,7 @@ func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error {
}
hs.sentDummyCCS = true
- _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
- return err
+ return hs.c.writeChangeCipherRecord()
}
func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error {
@@ -439,7 +445,9 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID)
// The first ClientHello gets double-hashed into the transcript upon a
// HelloRetryRequest. See RFC 8446, Section 4.4.1.
- hs.transcript.Write(hs.clientHello.marshal())
+ if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
+ return err
+ }
chHash := hs.transcript.Sum(nil)
hs.transcript.Reset()
hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))})
@@ -455,8 +463,7 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID)
selectedGroup: selectedGroup,
}
- hs.transcript.Write(helloRetryRequest.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(helloRetryRequest, hs.transcript); err != nil {
return err
}
@@ -464,7 +471,8 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID)
return err
}
- msg, err := c.readHandshake()
+ // clientHelloMsg is not included in the transcript.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
@@ -560,9 +568,10 @@ func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool {
func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
c := hs.c
- hs.transcript.Write(hs.clientHello.marshal())
- hs.transcript.Write(hs.hello.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil {
+ return err
+ }
+ if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil {
return err
}
@@ -605,8 +614,7 @@ func (hs *serverHandshakeStateTLS13) sendServerParameters() error {
hs.encryptedExtensions.additionalExtensions = hs.c.extraConfig.GetExtensions(typeEncryptedExtensions)
}
- hs.transcript.Write(hs.encryptedExtensions.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, hs.encryptedExtensions.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(hs.encryptedExtensions, hs.transcript); err != nil {
return err
}
@@ -630,13 +638,12 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
certReq := new(certificateRequestMsgTLS13)
certReq.ocspStapling = true
certReq.scts = true
- certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms
+ certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms()
if c.config.ClientCAs != nil {
certReq.certificateAuthorities = c.config.ClientCAs.Subjects()
}
- hs.transcript.Write(certReq.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certReq, hs.transcript); err != nil {
return err
}
}
@@ -647,8 +654,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0
certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0
- hs.transcript.Write(certMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil {
return err
}
@@ -679,8 +685,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error {
}
certVerifyMsg.signature = sig
- hs.transcript.Write(certVerifyMsg.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil {
return err
}
@@ -694,8 +699,7 @@ func (hs *serverHandshakeStateTLS13) sendServerFinished() error {
verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript),
}
- hs.transcript.Write(finished.marshal())
- if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil {
return err
}
@@ -757,7 +761,9 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
finishedMsg := &finishedMsg{
verifyData: hs.clientFinished,
}
- hs.transcript.Write(finishedMsg.marshal())
+ if err := transcriptMsg(finishedMsg, hs.transcript); err != nil {
+ return err
+ }
if !hs.shouldSendSessionTickets() {
return nil
@@ -778,7 +784,7 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error {
return err
}
- if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ if _, err := c.writeHandshakeRecord(m, nil); err != nil {
return err
}
@@ -803,7 +809,7 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
// If we requested a client certificate, then the client must send a
// certificate message. If it's empty, no CertificateVerify is sent.
- msg, err := c.readHandshake()
+ msg, err := c.readHandshake(hs.transcript)
if err != nil {
return err
}
@@ -813,7 +819,6 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
- hs.transcript.Write(certMsg.marshal())
if err := c.processCertsFromClient(certMsg.certificate); err != nil {
return err
@@ -827,7 +832,10 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
}
if len(certMsg.certificate.Certificate) != 0 {
- msg, err = c.readHandshake()
+ // certificateVerifyMsg is included in the transcript, but not until
+ // after we verify the handshake signature, since the state before
+ // this message was sent is used.
+ msg, err = c.readHandshake(nil)
if err != nil {
return err
}
@@ -839,7 +847,7 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
}
// See RFC 8446, Section 4.4.3.
- if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) {
+ if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms()) {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: client certificate used with invalid signature algorithm")
}
@@ -858,7 +866,9 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
return errors.New("tls: invalid signature by the client certificate: " + err.Error())
}
- hs.transcript.Write(certVerify.marshal())
+ if err := transcriptMsg(certVerify, hs.transcript); err != nil {
+ return err
+ }
}
// If we waited until the client certificates to send session tickets, we
@@ -873,7 +883,8 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error {
func (hs *serverHandshakeStateTLS13) readClientFinished() error {
c := hs.c
- msg, err := c.readHandshake()
+ // finishedMsg is not included in the transcript.
+ msg, err := c.readHandshake(nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/key_agreement.go b/vendor/github.com/quic-go/qtls-go1-20/key_agreement.go
similarity index 94%
rename from vendor/github.com/marten-seemann/qtls-go1-18/key_agreement.go
rename to vendor/github.com/quic-go/qtls-go1-20/key_agreement.go
index 453a8dcf0..f926869a1 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/key_agreement.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/key_agreement.go
@@ -6,6 +6,7 @@ package qtls
import (
"crypto"
+ "crypto/ecdh"
"crypto/md5"
"crypto/rsa"
"crypto/sha1"
@@ -157,7 +158,7 @@ func hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint1
type ecdheKeyAgreement struct {
version uint16
isRSA bool
- params ecdheParameters
+ key *ecdh.PrivateKey
// ckx and preMasterSecret are generated in processServerKeyExchange
// and returned in generateClientKeyExchange.
@@ -177,18 +178,18 @@ func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *config, cert *Cer
if curveID == 0 {
return nil, errors.New("tls: no supported elliptic curves offered")
}
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
+ if _, ok := curveForCurveID(curveID); !ok {
return nil, errors.New("tls: CurvePreferences includes unsupported curve")
}
- params, err := generateECDHEParameters(config.rand(), curveID)
+ key, err := generateECDHEKey(config.rand(), curveID)
if err != nil {
return nil, err
}
- ka.params = params
+ ka.key = key
// See RFC 4492, Section 5.4.
- ecdhePublic := params.PublicKey()
+ ecdhePublic := key.PublicKey().Bytes()
serverECDHEParams := make([]byte, 1+2+1+len(ecdhePublic))
serverECDHEParams[0] = 3 // named curve
serverECDHEParams[1] = byte(curveID >> 8)
@@ -259,8 +260,12 @@ func (ka *ecdheKeyAgreement) processClientKeyExchange(config *config, cert *Cert
return nil, errClientKeyExchange
}
- preMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:])
- if preMasterSecret == nil {
+ peerKey, err := ka.key.Curve().NewPublicKey(ckx.ciphertext[1:])
+ if err != nil {
+ return nil, errClientKeyExchange
+ }
+ preMasterSecret, err := ka.key.ECDH(peerKey)
+ if err != nil {
return nil, errClientKeyExchange
}
@@ -288,22 +293,26 @@ func (ka *ecdheKeyAgreement) processServerKeyExchange(config *config, clientHell
return errServerKeyExchange
}
- if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok {
+ if _, ok := curveForCurveID(curveID); !ok {
return errors.New("tls: server selected unsupported curve")
}
- params, err := generateECDHEParameters(config.rand(), curveID)
+ key, err := generateECDHEKey(config.rand(), curveID)
if err != nil {
return err
}
- ka.params = params
+ ka.key = key
- ka.preMasterSecret = params.SharedKey(publicKey)
- if ka.preMasterSecret == nil {
+ peerKey, err := key.Curve().NewPublicKey(publicKey)
+ if err != nil {
+ return errServerKeyExchange
+ }
+ ka.preMasterSecret, err = key.ECDH(peerKey)
+ if err != nil {
return errServerKeyExchange
}
- ourPublicKey := params.PublicKey()
+ ourPublicKey := key.PublicKey().Bytes()
ka.ckx = new(clientKeyExchangeMsg)
ka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey))
ka.ckx.ciphertext[0] = byte(len(ourPublicKey))
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/key_schedule.go b/vendor/github.com/quic-go/qtls-go1-20/key_schedule.go
similarity index 63%
rename from vendor/github.com/marten-seemann/qtls-go1-17/key_schedule.go
rename to vendor/github.com/quic-go/qtls-go1-20/key_schedule.go
index da13904a6..c410a3e82 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-17/key_schedule.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/key_schedule.go
@@ -5,15 +5,14 @@
package qtls
import (
- "crypto/elliptic"
+ "crypto/ecdh"
"crypto/hmac"
"errors"
+ "fmt"
"hash"
"io"
- "math/big"
"golang.org/x/crypto/cryptobyte"
- "golang.org/x/crypto/curve25519"
"golang.org/x/crypto/hkdf"
)
@@ -42,8 +41,24 @@ func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []by
hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(context)
})
+ hkdfLabelBytes, err := hkdfLabel.Bytes()
+ if err != nil {
+ // Rather than calling BytesOrPanic, we explicitly handle this error, in
+ // order to provide a reasonable error message. It should be basically
+ // impossible for this to panic, and routing errors back through the
+ // tree rooted in this function is quite painful. The labels are fixed
+ // size, and the context is either a fixed-length computed hash, or
+ // parsed from a field which has the same length limitation. As such, an
+ // error here is likely to only be caused during development.
+ //
+ // NOTE: another reasonable approach here might be to return a
+ // randomized slice if we encounter an error, which would break the
+ // connection, but avoid panicking. This would perhaps be safer but
+ // significantly more confusing to users.
+ panic(fmt.Errorf("failed to construct HKDF label: %s", err))
+ }
out := make([]byte, length)
- n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out)
+ n, err := hkdf.Expand(c.hash.New, secret, hkdfLabelBytes).Read(out)
if err != nil || n != length {
panic("tls: HKDF-Expand-Label invocation failed unexpectedly")
}
@@ -101,99 +116,43 @@ func (c *cipherSuiteTLS13) exportKeyingMaterial(masterSecret []byte, transcript
}
}
-// ecdheParameters implements Diffie-Hellman with either NIST curves or X25519,
+// generateECDHEKey returns a PrivateKey that implements Diffie-Hellman
// according to RFC 8446, Section 4.2.8.2.
-type ecdheParameters interface {
- CurveID() CurveID
- PublicKey() []byte
- SharedKey(peerPublicKey []byte) []byte
-}
-
-func generateECDHEParameters(rand io.Reader, curveID CurveID) (ecdheParameters, error) {
- if curveID == X25519 {
- privateKey := make([]byte, curve25519.ScalarSize)
- if _, err := io.ReadFull(rand, privateKey); err != nil {
- return nil, err
- }
- publicKey, err := curve25519.X25519(privateKey, curve25519.Basepoint)
- if err != nil {
- return nil, err
- }
- return &x25519Parameters{privateKey: privateKey, publicKey: publicKey}, nil
- }
-
+func generateECDHEKey(rand io.Reader, curveID CurveID) (*ecdh.PrivateKey, error) {
curve, ok := curveForCurveID(curveID)
if !ok {
return nil, errors.New("tls: internal error: unsupported curve")
}
- p := &nistParameters{curveID: curveID}
- var err error
- p.privateKey, p.x, p.y, err = elliptic.GenerateKey(curve, rand)
- if err != nil {
- return nil, err
- }
- return p, nil
+ return curve.GenerateKey(rand)
}
-func curveForCurveID(id CurveID) (elliptic.Curve, bool) {
+func curveForCurveID(id CurveID) (ecdh.Curve, bool) {
switch id {
+ case X25519:
+ return ecdh.X25519(), true
case CurveP256:
- return elliptic.P256(), true
+ return ecdh.P256(), true
case CurveP384:
- return elliptic.P384(), true
+ return ecdh.P384(), true
case CurveP521:
- return elliptic.P521(), true
+ return ecdh.P521(), true
default:
return nil, false
}
}
-type nistParameters struct {
- privateKey []byte
- x, y *big.Int // public key
- curveID CurveID
-}
-
-func (p *nistParameters) CurveID() CurveID {
- return p.curveID
-}
-
-func (p *nistParameters) PublicKey() []byte {
- curve, _ := curveForCurveID(p.curveID)
- return elliptic.Marshal(curve, p.x, p.y)
-}
-
-func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte {
- curve, _ := curveForCurveID(p.curveID)
- // Unmarshal also checks whether the given point is on the curve.
- x, y := elliptic.Unmarshal(curve, peerPublicKey)
- if x == nil {
- return nil
- }
-
- xShared, _ := curve.ScalarMult(x, y, p.privateKey)
- sharedKey := make([]byte, (curve.Params().BitSize+7)/8)
- return xShared.FillBytes(sharedKey)
-}
-
-type x25519Parameters struct {
- privateKey []byte
- publicKey []byte
-}
-
-func (p *x25519Parameters) CurveID() CurveID {
- return X25519
-}
-
-func (p *x25519Parameters) PublicKey() []byte {
- return p.publicKey[:]
-}
-
-func (p *x25519Parameters) SharedKey(peerPublicKey []byte) []byte {
- sharedKey, err := curve25519.X25519(p.privateKey, peerPublicKey)
- if err != nil {
- return nil
+func curveIDForCurve(curve ecdh.Curve) (CurveID, bool) {
+ switch curve {
+ case ecdh.X25519():
+ return X25519, true
+ case ecdh.P256():
+ return CurveP256, true
+ case ecdh.P384():
+ return CurveP384, true
+ case ecdh.P521():
+ return CurveP521, true
+ default:
+ return 0, false
}
- return sharedKey
}
diff --git a/vendor/github.com/quic-go/qtls-go1-20/notboring.go b/vendor/github.com/quic-go/qtls-go1-20/notboring.go
new file mode 100644
index 000000000..f292e4f02
--- /dev/null
+++ b/vendor/github.com/quic-go/qtls-go1-20/notboring.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package qtls
+
+func needFIPS() bool { return false }
+
+func supportedSignatureAlgorithms() []SignatureScheme {
+ return defaultSupportedSignatureAlgorithms
+}
+
+func fipsMinVersion(c *config) uint16 { panic("fipsMinVersion") }
+func fipsMaxVersion(c *config) uint16 { panic("fipsMaxVersion") }
+func fipsCurvePreferences(c *config) []CurveID { panic("fipsCurvePreferences") }
+func fipsCipherSuites(c *config) []uint16 { panic("fipsCipherSuites") }
+
+var fipsSupportedSignatureAlgorithms []SignatureScheme
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/prf.go b/vendor/github.com/quic-go/qtls-go1-20/prf.go
similarity index 99%
rename from vendor/github.com/marten-seemann/qtls-go1-18/prf.go
rename to vendor/github.com/quic-go/qtls-go1-20/prf.go
index 9eb0221a0..147128918 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-18/prf.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/prf.go
@@ -215,7 +215,7 @@ func (h finishedHash) serverSum(masterSecret []byte) []byte {
// hashForClientCertificate returns the handshake messages so far, pre-hashed if
// necessary, suitable for signing by a TLS client certificate.
-func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash, masterSecret []byte) []byte {
+func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash) []byte {
if (h.version >= VersionTLS12 || sigType == signatureEd25519) && h.buffer == nil {
panic("tls: handshake hash for a client certificate requested after discarding the handshake buffer")
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/ticket.go b/vendor/github.com/quic-go/qtls-go1-20/ticket.go
similarity index 95%
rename from vendor/github.com/marten-seemann/qtls-go1-17/ticket.go
rename to vendor/github.com/quic-go/qtls-go1-20/ticket.go
index 81e8a52ea..1b9289c2f 100644
--- a/vendor/github.com/marten-seemann/qtls-go1-17/ticket.go
+++ b/vendor/github.com/quic-go/qtls-go1-20/ticket.go
@@ -34,7 +34,7 @@ type sessionState struct {
usedOldKey bool
}
-func (m *sessionState) marshal() []byte {
+func (m *sessionState) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint16(m.vers)
b.AddUint16(m.cipherSuite)
@@ -49,7 +49,7 @@ func (m *sessionState) marshal() []byte {
})
}
})
- return b.BytesOrPanic()
+ return b.Bytes()
}
func (m *sessionState) unmarshal(data []byte) bool {
@@ -94,7 +94,7 @@ type sessionStateTLS13 struct {
appData []byte
}
-func (m *sessionStateTLS13) marshal() []byte {
+func (m *sessionStateTLS13) marshal() ([]byte, error) {
var b cryptobyte.Builder
b.AddUint16(VersionTLS13)
b.AddUint8(2) // revision
@@ -111,7 +111,7 @@ func (m *sessionStateTLS13) marshal() []byte {
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.appData)
})
- return b.BytesOrPanic()
+ return b.Bytes()
}
func (m *sessionStateTLS13) unmarshal(data []byte) bool {
@@ -227,8 +227,11 @@ func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, e
if c.extraConfig != nil {
state.maxEarlyData = c.extraConfig.MaxEarlyData
}
- var err error
- m.label, err = c.encryptTicket(state.marshal())
+ stateBytes, err := state.marshal()
+ if err != nil {
+ return nil, err
+ }
+ m.label, err = c.encryptTicket(stateBytes)
if err != nil {
return nil, err
}
@@ -259,7 +262,7 @@ func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, e
// The ticket may be nil if config.SessionTicketsDisabled is set,
// or if the client isn't able to receive session tickets.
func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) {
- if c.isClient || !c.handshakeComplete() || c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil {
+ if c.isClient || !c.isHandshakeComplete.Load() || c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil {
return nil, errors.New("GetSessionTicket is only valid for servers after completion of the handshake, and if an alternative record layer is set.")
}
if c.config.SessionTicketsDisabled {
@@ -270,5 +273,5 @@ func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) {
if err != nil {
return nil, err
}
- return m.marshal(), nil
+ return m.marshal()
}
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/tls.go b/vendor/github.com/quic-go/qtls-go1-20/tls.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-18/tls.go
rename to vendor/github.com/quic-go/qtls-go1-20/tls.go
diff --git a/vendor/github.com/marten-seemann/qtls-go1-17/unsafe.go b/vendor/github.com/quic-go/qtls-go1-20/unsafe.go
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-17/unsafe.go
rename to vendor/github.com/quic-go/qtls-go1-20/unsafe.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/.gitignore b/vendor/github.com/quic-go/quic-go/.gitignore
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/.gitignore
rename to vendor/github.com/quic-go/quic-go/.gitignore
diff --git a/vendor/github.com/lucas-clemente/quic-go/.golangci.yml b/vendor/github.com/quic-go/quic-go/.golangci.yml
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/.golangci.yml
rename to vendor/github.com/quic-go/quic-go/.golangci.yml
diff --git a/vendor/github.com/lucas-clemente/quic-go/Changelog.md b/vendor/github.com/quic-go/quic-go/Changelog.md
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/Changelog.md
rename to vendor/github.com/quic-go/quic-go/Changelog.md
index c1c332327..82df5fb24 100644
--- a/vendor/github.com/lucas-clemente/quic-go/Changelog.md
+++ b/vendor/github.com/quic-go/quic-go/Changelog.md
@@ -101,8 +101,8 @@
- Add a `quic.Config` option to configure keep-alive
- Rename the STK to Cookie
- Implement `net.Conn`-style deadlines for streams
-- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details.
-- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details.
+- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details.
+- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details.
- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper`
- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn`
- Drop support for Go 1.7 and 1.8.
diff --git a/vendor/github.com/lucas-clemente/quic-go/LICENSE b/vendor/github.com/quic-go/quic-go/LICENSE
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/LICENSE
rename to vendor/github.com/quic-go/quic-go/LICENSE
diff --git a/vendor/github.com/quic-go/quic-go/README.md b/vendor/github.com/quic-go/quic-go/README.md
new file mode 100644
index 000000000..977bb9283
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/README.md
@@ -0,0 +1,63 @@
+# A QUIC implementation in pure Go
+
+
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/quic-go)](https://pkg.go.dev/github.com/quic-go/quic-go)
+[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/quic-go/)
+
+quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) and Datagram Packetization Layer Path MTU
+ Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)).
+
+In addition to the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem.
+
+## Guides
+
+*We currently support Go 1.19.x and Go 1.20.x*
+
+Running tests:
+
+ go test ./...
+
+### QUIC without HTTP/3
+
+Take a look at [this echo example](example/echo/echo.go).
+
+## Usage
+
+### As a server
+
+See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go:
+
+```go
+http.Handle("/", http.FileServer(http.Dir(wwwDir)))
+http3.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil)
+```
+
+### As a client
+
+See the [example client](example/client/main.go). Use a `http3.RoundTripper` as a `Transport` in a `http.Client`.
+
+```go
+http.Client{
+ Transport: &http3.RoundTripper{},
+}
+```
+
+## Projects using quic-go
+
+| Project | Description | Stars |
+|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-------|
+| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server. | ![GitHub Repo stars](https://img.shields.io/github/stars/AdguardTeam/AdGuardHome?style=flat-square) |
+| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) |
+| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) |
+| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) |
+| [go-libp2p](https://github.com/libp2p/go-libp2p) | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others | ![GitHub Repo stars](https://img.shields.io/github/stars/libp2p/go-libp2p?style=flat-square) |
+| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) |
+| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) |
+| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) |
+| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) |
+| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) |
+
+## Contributing
+
+We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
diff --git a/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go b/vendor/github.com/quic-go/quic-go/buffer_pool.go
similarity index 96%
rename from vendor/github.com/lucas-clemente/quic-go/buffer_pool.go
rename to vendor/github.com/quic-go/quic-go/buffer_pool.go
index c0b7067da..f6745b080 100644
--- a/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go
+++ b/vendor/github.com/quic-go/quic-go/buffer_pool.go
@@ -3,7 +3,7 @@ package quic
import (
"sync"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
type packetBuffer struct {
diff --git a/vendor/github.com/lucas-clemente/quic-go/client.go b/vendor/github.com/quic-go/quic-go/client.go
similarity index 93%
rename from vendor/github.com/lucas-clemente/quic-go/client.go
rename to vendor/github.com/quic-go/quic-go/client.go
index be8390e65..b05f0ab2e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/client.go
+++ b/vendor/github.com/quic-go/quic-go/client.go
@@ -6,11 +6,10 @@ import (
"errors"
"fmt"
"net"
- "strings"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/logging"
)
type client struct {
@@ -42,11 +41,8 @@ type client struct {
logger utils.Logger
}
-var (
- // make it possible to mock connection ID generation in the tests
- generateConnectionID = protocol.GenerateConnectionID
- generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
-)
+// make it possible to mock connection ID for initial generation in the tests
+var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
// DialAddr establishes a new QUIC connection to a server.
// It uses a new UDP connection and closes this connection when the QUIC connection is closed.
@@ -193,7 +189,7 @@ func dialContext(
return nil, err
}
config = populateClientConfig(config, createdPacketConn)
- packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDLength, config.StatelessResetKey, config.Tracer)
+ packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDGenerator.ConnectionIDLen(), config.StatelessResetKey, config.Tracer)
if err != nil {
return nil, err
}
@@ -235,13 +231,10 @@ func newClient(
tlsConf = tlsConf.Clone()
}
if tlsConf.ServerName == "" {
- sni := host
- if strings.IndexByte(sni, ':') != -1 {
- var err error
- sni, _, err = net.SplitHostPort(sni)
- if err != nil {
- return nil, err
- }
+ sni, _, err := net.SplitHostPort(host)
+ if err != nil {
+ // It's ok if net.SplitHostPort returns an error - it could be a hostname/IP address without a port.
+ sni = host
}
tlsConf.ServerName = sni
@@ -256,7 +249,7 @@ func newClient(
}
}
- srcConnID, err := generateConnectionID(config.ConnectionIDLength)
+ srcConnID, err := config.ConnectionIDGenerator.GenerateConnectionID()
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/quic-go/quic-go/closed_conn.go b/vendor/github.com/quic-go/quic-go/closed_conn.go
new file mode 100644
index 000000000..73904b846
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/closed_conn.go
@@ -0,0 +1,64 @@
+package quic
+
+import (
+ "math/bits"
+ "net"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+)
+
+// A closedLocalConn is a connection that we closed locally.
+// When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame,
+// with an exponential backoff.
+type closedLocalConn struct {
+ counter uint32
+ perspective protocol.Perspective
+ logger utils.Logger
+
+ sendPacket func(net.Addr, *packetInfo)
+}
+
+var _ packetHandler = &closedLocalConn{}
+
+// newClosedLocalConn creates a new closedLocalConn and runs it.
+func newClosedLocalConn(sendPacket func(net.Addr, *packetInfo), pers protocol.Perspective, logger utils.Logger) packetHandler {
+ return &closedLocalConn{
+ sendPacket: sendPacket,
+ perspective: pers,
+ logger: logger,
+ }
+}
+
+func (c *closedLocalConn) handlePacket(p *receivedPacket) {
+ c.counter++
+ // exponential backoff
+ // only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving
+ if bits.OnesCount32(c.counter) != 1 {
+ return
+ }
+ c.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", c.counter)
+ c.sendPacket(p.remoteAddr, p.info)
+}
+
+func (c *closedLocalConn) shutdown() {}
+func (c *closedLocalConn) destroy(error) {}
+func (c *closedLocalConn) getPerspective() protocol.Perspective { return c.perspective }
+
+// A closedRemoteConn is a connection that was closed remotely.
+// For such a connection, we might receive reordered packets that were sent before the CONNECTION_CLOSE.
+// We can just ignore those packets.
+type closedRemoteConn struct {
+ perspective protocol.Perspective
+}
+
+var _ packetHandler = &closedRemoteConn{}
+
+func newClosedRemoteConn(pers protocol.Perspective) packetHandler {
+ return &closedRemoteConn{perspective: pers}
+}
+
+func (s *closedRemoteConn) handlePacket(*receivedPacket) {}
+func (s *closedRemoteConn) shutdown() {}
+func (s *closedRemoteConn) destroy(error) {}
+func (s *closedRemoteConn) getPerspective() protocol.Perspective { return s.perspective }
diff --git a/vendor/github.com/lucas-clemente/quic-go/codecov.yml b/vendor/github.com/quic-go/quic-go/codecov.yml
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/codecov.yml
rename to vendor/github.com/quic-go/quic-go/codecov.yml
index ee9cfd3b3..074d98325 100644
--- a/vendor/github.com/lucas-clemente/quic-go/codecov.yml
+++ b/vendor/github.com/quic-go/quic-go/codecov.yml
@@ -12,6 +12,7 @@ coverage:
- internal/utils/newconnectionid_linkedlist.go
- internal/utils/packetinterval_linkedlist.go
- internal/utils/linkedlist/linkedlist.go
+ - logging/null_tracer.go
- fuzzing/
- metrics/
status:
diff --git a/vendor/github.com/lucas-clemente/quic-go/config.go b/vendor/github.com/quic-go/quic-go/config.go
similarity index 73%
rename from vendor/github.com/lucas-clemente/quic-go/config.go
rename to vendor/github.com/quic-go/quic-go/config.go
index 5d969a12a..3ead9b7a1 100644
--- a/vendor/github.com/lucas-clemente/quic-go/config.go
+++ b/vendor/github.com/quic-go/quic-go/config.go
@@ -2,11 +2,11 @@ package quic
import (
"errors"
+ "net"
"time"
- "github.com/lucas-clemente/quic-go/internal/utils"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
// Clone clones a Config
@@ -16,7 +16,7 @@ func (c *Config) Clone() *Config {
}
func (c *Config) handshakeTimeout() time.Duration {
- return utils.MaxDuration(protocol.DefaultHandshakeTimeout, 2*c.HandshakeIdleTimeout)
+ return utils.Max(protocol.DefaultHandshakeTimeout, 2*c.HandshakeIdleTimeout)
}
func validateConfig(config *Config) error {
@@ -35,12 +35,15 @@ func validateConfig(config *Config) error {
// populateServerConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateServerConfig(config *Config) *Config {
- config = populateConfig(config)
- if config.ConnectionIDLength == 0 {
- config.ConnectionIDLength = protocol.DefaultConnectionIDLength
+ config = populateConfig(config, protocol.DefaultConnectionIDLength)
+ if config.MaxTokenAge == 0 {
+ config.MaxTokenAge = protocol.TokenValidity
}
- if config.AcceptToken == nil {
- config.AcceptToken = defaultAcceptToken
+ if config.MaxRetryTokenAge == 0 {
+ config.MaxRetryTokenAge = protocol.RetryTokenValidity
+ }
+ if config.RequireAddressValidation == nil {
+ config.RequireAddressValidation = func(net.Addr) bool { return false }
}
return config
}
@@ -48,14 +51,16 @@ func populateServerConfig(config *Config) *Config {
// populateClientConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateClientConfig(config *Config, createdPacketConn bool) *Config {
- config = populateConfig(config)
- if config.ConnectionIDLength == 0 && !createdPacketConn {
- config.ConnectionIDLength = protocol.DefaultConnectionIDLength
+ defaultConnIDLen := protocol.DefaultConnectionIDLength
+ if createdPacketConn {
+ defaultConnIDLen = 0
}
+
+ config = populateConfig(config, defaultConnIDLen)
return config
}
-func populateConfig(config *Config) *Config {
+func populateConfig(config *Config, defaultConnIDLen int) *Config {
if config == nil {
config = &Config{}
}
@@ -63,6 +68,10 @@ func populateConfig(config *Config) *Config {
if len(versions) == 0 {
versions = protocol.SupportedVersions
}
+ conIDLen := config.ConnectionIDLength
+ if config.ConnectionIDLength == 0 {
+ conIDLen = defaultConnIDLen
+ }
handshakeIdleTimeout := protocol.DefaultHandshakeIdleTimeout
if config.HandshakeIdleTimeout != 0 {
handshakeIdleTimeout = config.HandshakeIdleTimeout
@@ -99,12 +108,18 @@ func populateConfig(config *Config) *Config {
} else if maxIncomingUniStreams < 0 {
maxIncomingUniStreams = 0
}
+ connIDGenerator := config.ConnectionIDGenerator
+ if connIDGenerator == nil {
+ connIDGenerator = &protocol.DefaultConnectionIDGenerator{ConnLen: conIDLen}
+ }
return &Config{
Versions: versions,
HandshakeIdleTimeout: handshakeIdleTimeout,
MaxIdleTimeout: idleTimeout,
- AcceptToken: config.AcceptToken,
+ MaxTokenAge: config.MaxTokenAge,
+ MaxRetryTokenAge: config.MaxRetryTokenAge,
+ RequireAddressValidation: config.RequireAddressValidation,
KeepAlivePeriod: config.KeepAlivePeriod,
InitialStreamReceiveWindow: initialStreamReceiveWindow,
MaxStreamReceiveWindow: maxStreamReceiveWindow,
@@ -113,12 +128,14 @@ func populateConfig(config *Config) *Config {
AllowConnectionWindowIncrease: config.AllowConnectionWindowIncrease,
MaxIncomingStreams: maxIncomingStreams,
MaxIncomingUniStreams: maxIncomingUniStreams,
- ConnectionIDLength: config.ConnectionIDLength,
+ ConnectionIDLength: conIDLen,
+ ConnectionIDGenerator: connIDGenerator,
StatelessResetKey: config.StatelessResetKey,
TokenStore: config.TokenStore,
EnableDatagrams: config.EnableDatagrams,
DisablePathMTUDiscovery: config.DisablePathMTUDiscovery,
DisableVersionNegotiationPackets: config.DisableVersionNegotiationPackets,
+ Allow0RTT: config.Allow0RTT,
Tracer: config.Tracer,
}
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go b/vendor/github.com/quic-go/quic-go/conn_id_generator.go
similarity index 73%
rename from vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go
rename to vendor/github.com/quic-go/quic-go/conn_id_generator.go
index 90c2b7a6f..2d28dc619 100644
--- a/vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go
+++ b/vendor/github.com/quic-go/quic-go/conn_id_generator.go
@@ -3,42 +3,40 @@ package quic
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type connIDGenerator struct {
- connIDLen int
+ generator ConnectionIDGenerator
highestSeq uint64
activeSrcConnIDs map[uint64]protocol.ConnectionID
- initialClientDestConnID protocol.ConnectionID
+ initialClientDestConnID *protocol.ConnectionID // nil for the client
addConnectionID func(protocol.ConnectionID)
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken
removeConnectionID func(protocol.ConnectionID)
retireConnectionID func(protocol.ConnectionID)
- replaceWithClosed func(protocol.ConnectionID, packetHandler)
+ replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte)
queueControlFrame func(wire.Frame)
-
- version protocol.VersionNumber
}
func newConnIDGenerator(
initialConnectionID protocol.ConnectionID,
- initialClientDestConnID protocol.ConnectionID, // nil for the client
+ initialClientDestConnID *protocol.ConnectionID, // nil for the client
addConnectionID func(protocol.ConnectionID),
getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken,
removeConnectionID func(protocol.ConnectionID),
retireConnectionID func(protocol.ConnectionID),
- replaceWithClosed func(protocol.ConnectionID, packetHandler),
+ replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte),
queueControlFrame func(wire.Frame),
- version protocol.VersionNumber,
+ generator ConnectionIDGenerator,
) *connIDGenerator {
m := &connIDGenerator{
- connIDLen: initialConnectionID.Len(),
+ generator: generator,
activeSrcConnIDs: make(map[uint64]protocol.ConnectionID),
addConnectionID: addConnectionID,
getStatelessResetToken: getStatelessResetToken,
@@ -46,7 +44,6 @@ func newConnIDGenerator(
retireConnectionID: retireConnectionID,
replaceWithClosed: replaceWithClosed,
queueControlFrame: queueControlFrame,
- version: version,
}
m.activeSrcConnIDs[0] = initialConnectionID
m.initialClientDestConnID = initialClientDestConnID
@@ -54,7 +51,7 @@ func newConnIDGenerator(
}
func (m *connIDGenerator) SetMaxActiveConnIDs(limit uint64) error {
- if m.connIDLen == 0 {
+ if m.generator.ConnectionIDLen() == 0 {
return nil
}
// The active_connection_id_limit transport parameter is the number of
@@ -63,7 +60,7 @@ func (m *connIDGenerator) SetMaxActiveConnIDs(limit uint64) error {
// transport parameter.
// We currently don't send the preferred_address transport parameter,
// so we can issue (limit - 1) connection IDs.
- for i := uint64(len(m.activeSrcConnIDs)); i < utils.MinUint64(limit, protocol.MaxIssuedConnectionIDs); i++ {
+ for i := uint64(len(m.activeSrcConnIDs)); i < utils.Min(limit, protocol.MaxIssuedConnectionIDs); i++ {
if err := m.issueNewConnID(); err != nil {
return err
}
@@ -83,7 +80,7 @@ func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.Connect
if !ok {
return nil
}
- if connID.Equal(sentWithDestConnID) {
+ if connID == sentWithDestConnID {
return &qerr.TransportError{
ErrorCode: qerr.ProtocolViolation,
ErrorMessage: fmt.Sprintf("retired connection ID %d (%s), which was used as the Destination Connection ID on this packet", seq, connID),
@@ -99,7 +96,7 @@ func (m *connIDGenerator) Retire(seq uint64, sentWithDestConnID protocol.Connect
}
func (m *connIDGenerator) issueNewConnID() error {
- connID, err := protocol.GenerateConnectionID(m.connIDLen)
+ connID, err := m.generator.GenerateConnectionID()
if err != nil {
return err
}
@@ -116,25 +113,27 @@ func (m *connIDGenerator) issueNewConnID() error {
func (m *connIDGenerator) SetHandshakeComplete() {
if m.initialClientDestConnID != nil {
- m.retireConnectionID(m.initialClientDestConnID)
+ m.retireConnectionID(*m.initialClientDestConnID)
m.initialClientDestConnID = nil
}
}
func (m *connIDGenerator) RemoveAll() {
if m.initialClientDestConnID != nil {
- m.removeConnectionID(m.initialClientDestConnID)
+ m.removeConnectionID(*m.initialClientDestConnID)
}
for _, connID := range m.activeSrcConnIDs {
m.removeConnectionID(connID)
}
}
-func (m *connIDGenerator) ReplaceWithClosed(handler packetHandler) {
+func (m *connIDGenerator) ReplaceWithClosed(pers protocol.Perspective, connClose []byte) {
+ connIDs := make([]protocol.ConnectionID, 0, len(m.activeSrcConnIDs)+1)
if m.initialClientDestConnID != nil {
- m.replaceWithClosed(m.initialClientDestConnID, handler)
+ connIDs = append(connIDs, *m.initialClientDestConnID)
}
for _, connID := range m.activeSrcConnIDs {
- m.replaceWithClosed(connID, handler)
+ connIDs = append(connIDs, connID)
}
+ m.replaceWithClosed(connIDs, pers, connClose)
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go b/vendor/github.com/quic-go/quic-go/conn_id_manager.go
similarity index 90%
rename from vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go
rename to vendor/github.com/quic-go/quic-go/conn_id_manager.go
index e1b025a98..ba65aec04 100644
--- a/vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go
+++ b/vendor/github.com/quic-go/quic-go/conn_id_manager.go
@@ -3,14 +3,21 @@ package quic
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ list "github.com/quic-go/quic-go/internal/utils/linkedlist"
+ "github.com/quic-go/quic-go/internal/wire"
)
+type newConnID struct {
+ SequenceNumber uint64
+ ConnectionID protocol.ConnectionID
+ StatelessResetToken protocol.StatelessResetToken
+}
+
type connIDManager struct {
- queue utils.NewConnectionIDList
+ queue list.List[newConnID]
handshakeComplete bool
activeSequenceNumber uint64
@@ -71,7 +78,7 @@ func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
// Retire elements in the queue.
// Doesn't retire the active connection ID.
if f.RetirePriorTo > h.highestRetired {
- var next *utils.NewConnectionIDElement
+ var next *list.Element[newConnID]
for el := h.queue.Front(); el != nil; el = next {
if el.Value.SequenceNumber >= f.RetirePriorTo {
break
@@ -104,7 +111,7 @@ func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID, resetToken protocol.StatelessResetToken) error {
// insert a new element at the end
if h.queue.Len() == 0 || h.queue.Back().Value.SequenceNumber < seq {
- h.queue.PushBack(utils.NewConnectionID{
+ h.queue.PushBack(newConnID{
SequenceNumber: seq,
ConnectionID: connID,
StatelessResetToken: resetToken,
@@ -114,7 +121,7 @@ func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID
// insert a new element somewhere in the middle
for el := h.queue.Front(); el != nil; el = el.Next() {
if el.Value.SequenceNumber == seq {
- if !el.Value.ConnectionID.Equal(connID) {
+ if el.Value.ConnectionID != connID {
return fmt.Errorf("received conflicting connection IDs for sequence number %d", seq)
}
if el.Value.StatelessResetToken != resetToken {
@@ -123,7 +130,7 @@ func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID
break
}
if el.Value.SequenceNumber > seq {
- h.queue.InsertBefore(utils.NewConnectionID{
+ h.queue.InsertBefore(newConnID{
SequenceNumber: seq,
ConnectionID: connID,
StatelessResetToken: resetToken,
@@ -138,7 +145,7 @@ func (h *connIDManager) updateConnectionID() {
h.queueControlFrame(&wire.RetireConnectionIDFrame{
SequenceNumber: h.activeSequenceNumber,
})
- h.highestRetired = utils.MaxUint64(h.highestRetired, h.activeSequenceNumber)
+ h.highestRetired = utils.Max(h.highestRetired, h.activeSequenceNumber)
if h.activeStatelessResetToken != nil {
h.removeStatelessResetToken(*h.activeStatelessResetToken)
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go
similarity index 76%
rename from vendor/github.com/lucas-clemente/quic-go/connection.go
rename to vendor/github.com/quic-go/quic-go/connection.go
index ce45af86a..50db29641 100644
--- a/vendor/github.com/lucas-clemente/quic-go/connection.go
+++ b/vendor/github.com/quic-go/quic-go/connection.go
@@ -13,19 +13,20 @@ import (
"sync/atomic"
"time"
- "github.com/lucas-clemente/quic-go/internal/ackhandler"
- "github.com/lucas-clemente/quic-go/internal/flowcontrol"
- "github.com/lucas-clemente/quic-go/internal/handshake"
- "github.com/lucas-clemente/quic-go/internal/logutils"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/ackhandler"
+ "github.com/quic-go/quic-go/internal/flowcontrol"
+ "github.com/quic-go/quic-go/internal/handshake"
+ "github.com/quic-go/quic-go/internal/logutils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
)
type unpacker interface {
- Unpack(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error)
+ UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error)
+ UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error)
}
type streamGetter interface {
@@ -95,7 +96,7 @@ type connRunner interface {
GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken
Retire(protocol.ConnectionID)
Remove(protocol.ConnectionID)
- ReplaceWithClosed(protocol.ConnectionID, packetHandler)
+ ReplaceWithClosed([]protocol.ConnectionID, protocol.Perspective, []byte)
AddResetToken(protocol.StatelessResetToken, packetHandler)
RemoveResetToken(protocol.StatelessResetToken)
}
@@ -209,7 +210,7 @@ type connection struct {
peerParams *wire.TransportParameters
- timer *utils.Timer
+ timer connectionTimer
// keepAlivePingSent stores whether a keep alive PING is in flight.
// It is reset as soon as we receive a packet from the peer.
keepAlivePingSent bool
@@ -217,16 +218,18 @@ type connection struct {
datagramQueue *datagramQueue
+ connStateMutex sync.Mutex
+ connState ConnectionState
+
logID string
tracer logging.ConnectionTracer
logger utils.Logger
}
var (
- _ Connection = &connection{}
- _ EarlyConnection = &connection{}
- _ streamSender = &connection{}
- deadlineSendImmediately = time.Time{}.Add(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine
+ _ Connection = &connection{}
+ _ EarlyConnection = &connection{}
+ _ streamSender = &connection{}
)
var newConnection = func(
@@ -241,7 +244,7 @@ var newConnection = func(
conf *Config,
tlsConf *tls.Config,
tokenGenerator *handshake.TokenGenerator,
- enable0RTT bool,
+ clientAddressValidated bool,
tracer logging.ConnectionTracer,
tracingID uint64,
logger utils.Logger,
@@ -260,7 +263,7 @@ var newConnection = func(
logger: logger,
version: v,
}
- if origDestConnID != nil {
+ if origDestConnID.Len() > 0 {
s.logID = origDestConnID.String()
} else {
s.logID = destConnID.String()
@@ -273,14 +276,14 @@ var newConnection = func(
)
s.connIDGenerator = newConnIDGenerator(
srcConnID,
- clientDestConnID,
+ &clientDestConnID,
func(connID protocol.ConnectionID) { runner.Add(connID, s) },
runner.GetStatelessResetToken,
runner.Remove,
runner.Retire,
runner.ReplaceWithClosed,
s.queueControlFrame,
- s.version,
+ s.config.ConnectionIDGenerator,
)
s.preSetup()
s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID))
@@ -288,10 +291,10 @@ var newConnection = func(
0,
getMaxPacketSize(s.conn.RemoteAddr()),
s.rttStats,
+ clientAddressValidated,
s.perspective,
s.tracer,
s.logger,
- s.version,
)
initialStream := newCryptoStream()
handshakeStream := newCryptoStream()
@@ -314,10 +317,16 @@ var newConnection = func(
}
if s.config.EnableDatagrams {
params.MaxDatagramFrameSize = protocol.MaxDatagramFrameSize
+ } else {
+ params.MaxDatagramFrameSize = protocol.InvalidByteCount
}
if s.tracer != nil {
s.tracer.SentTransportParameters(params)
}
+ var allow0RTT func() bool
+ if conf.Allow0RTT != nil {
+ allow0RTT = func() bool { return conf.Allow0RTT(conn.RemoteAddr()) }
+ }
cs := handshake.NewCryptoSetupServer(
initialStream,
handshakeStream,
@@ -335,29 +344,15 @@ var newConnection = func(
},
},
tlsConf,
- enable0RTT,
+ allow0RTT,
s.rttStats,
tracer,
logger,
s.version,
)
s.cryptoStreamHandler = cs
- s.packer = newPacketPacker(
- srcConnID,
- s.connIDManager.Get,
- initialStream,
- handshakeStream,
- s.sentPacketHandler,
- s.retransmissionQueue,
- s.RemoteAddr(),
- cs,
- s.framer,
- s.receivedPacketHandler,
- s.datagramQueue,
- s.perspective,
- s.version,
- )
- s.unpacker = newPacketUnpacker(cs, s.version)
+ s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, initialStream, handshakeStream, s.sentPacketHandler, s.retransmissionQueue, s.RemoteAddr(), cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
+ s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, s.oneRTTStream)
return s
}
@@ -407,7 +402,7 @@ var newClientConnection = func(
runner.Retire,
runner.ReplaceWithClosed,
s.queueControlFrame,
- s.version,
+ s.config.ConnectionIDGenerator,
)
s.preSetup()
s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID))
@@ -415,10 +410,10 @@ var newClientConnection = func(
initialPacketNumber,
getMaxPacketSize(s.conn.RemoteAddr()),
s.rttStats,
+ false, /* has no effect */
s.perspective,
s.tracer,
s.logger,
- s.version,
)
initialStream := newCryptoStream()
handshakeStream := newCryptoStream()
@@ -438,6 +433,8 @@ var newClientConnection = func(
}
if s.config.EnableDatagrams {
params.MaxDatagramFrameSize = protocol.MaxDatagramFrameSize
+ } else {
+ params.MaxDatagramFrameSize = protocol.InvalidByteCount
}
if s.tracer != nil {
s.tracer.SentTransportParameters(params)
@@ -465,22 +462,8 @@ var newClientConnection = func(
s.clientHelloWritten = clientHelloWritten
s.cryptoStreamHandler = cs
s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, newCryptoStream())
- s.unpacker = newPacketUnpacker(cs, s.version)
- s.packer = newPacketPacker(
- srcConnID,
- s.connIDManager.Get,
- initialStream,
- handshakeStream,
- s.sentPacketHandler,
- s.retransmissionQueue,
- s.RemoteAddr(),
- cs,
- s.framer,
- s.receivedPacketHandler,
- s.datagramQueue,
- s.perspective,
- s.version,
- )
+ s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
+ s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, initialStream, handshakeStream, s.sentPacketHandler, s.retransmissionQueue, s.RemoteAddr(), cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective)
if len(tlsConf.ServerName) > 0 {
s.tokenStoreKey = tlsConf.ServerName
} else {
@@ -496,8 +479,8 @@ var newClientConnection = func(
func (s *connection) preSetup() {
s.sendQueue = newSendQueue(s.conn)
- s.retransmissionQueue = newRetransmissionQueue(s.version)
- s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams, s.version)
+ s.retransmissionQueue = newRetransmissionQueue()
+ s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams)
s.rttStats = &utils.RTTStats{}
s.connFlowController = flowcontrol.NewConnectionFlowController(
protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
@@ -519,9 +502,8 @@ func (s *connection) preSetup() {
uint64(s.config.MaxIncomingStreams),
uint64(s.config.MaxIncomingUniStreams),
s.perspective,
- s.version,
)
- s.framer = newFramer(s.streamsMap, s.version)
+ s.framer = newFramer(s.streamsMap)
s.receivedPackets = make(chan *receivedPacket, protocol.MaxConnUnprocessedPackets)
s.closeChan = make(chan closeError, 1)
s.sendingScheduled = make(chan struct{}, 1)
@@ -532,18 +514,21 @@ func (s *connection) preSetup() {
s.creationTime = now
s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame)
- if s.config.EnableDatagrams {
- s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
- }
+ s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
+ s.connState.Version = s.version
}
// run the connection main loop
func (s *connection) run() error {
defer s.ctxCancel()
- s.timer = utils.NewTimer()
+ s.timer = *newTimer()
- go s.cryptoStreamHandler.RunHandshake()
+ handshaking := make(chan struct{})
+ go func() {
+ defer close(handshaking)
+ s.cryptoStreamHandler.RunHandshake()
+ }()
go func() {
if err := s.sendQueue.Run(); err != nil {
s.destroyImpl(err)
@@ -694,12 +679,13 @@ runLoop:
}
}
+ s.cryptoStreamHandler.Close()
+ <-handshaking
s.handleCloseError(&closeErr)
if e := (&errCloseForRecreating{}); !errors.As(closeErr.err, &e) && s.tracer != nil {
s.tracer.Close()
}
s.logger.Infof("Connection %s closed.", s.logID)
- s.cryptoStreamHandler.Close()
s.sendQueue.Close()
s.timer.Stop()
return closeErr.err
@@ -719,14 +705,14 @@ func (s *connection) Context() context.Context {
}
func (s *connection) supportsDatagrams() bool {
- return s.peerParams.MaxDatagramFrameSize != protocol.InvalidByteCount
+ return s.peerParams.MaxDatagramFrameSize > 0
}
func (s *connection) ConnectionState() ConnectionState {
- return ConnectionState{
- TLS: s.cryptoStreamHandler.ConnectionState(),
- SupportsDatagrams: s.supportsDatagrams(),
- }
+ s.connStateMutex.Lock()
+ defer s.connStateMutex.Unlock()
+ s.connState.TLS = s.cryptoStreamHandler.ConnectionState()
+ return s.connState
}
// Time when the next keep-alive packet should be sent.
@@ -753,17 +739,12 @@ func (s *connection) maybeResetTimer() {
}
}
- if ackAlarm := s.receivedPacketHandler.GetAlarmTimeout(); !ackAlarm.IsZero() {
- deadline = utils.MinTime(deadline, ackAlarm)
- }
- if lossTime := s.sentPacketHandler.GetLossDetectionTimeout(); !lossTime.IsZero() {
- deadline = utils.MinTime(deadline, lossTime)
- }
- if !s.pacingDeadline.IsZero() {
- deadline = utils.MinTime(deadline, s.pacingDeadline)
- }
-
- s.timer.Reset(deadline)
+ s.timer.SetTimer(
+ deadline,
+ s.receivedPacketHandler.GetAlarmTimeout(),
+ s.sentPacketHandler.GetLossDetectionTimeout(),
+ s.pacingDeadline,
+ )
}
func (s *connection) idleTimeoutStartTime() time.Time {
@@ -816,7 +797,7 @@ func (s *connection) handleHandshakeConfirmed() {
if maxPacketSize == 0 {
maxPacketSize = protocol.MaxByteCount
}
- maxPacketSize = utils.MinByteCount(maxPacketSize, protocol.MaxPacketBufferSize)
+ maxPacketSize = utils.Min(maxPacketSize, protocol.MaxPacketBufferSize)
s.mtuDiscoverer = newMTUDiscoverer(
s.rttStats,
getMaxPacketSize(s.conn.RemoteAddr()),
@@ -843,61 +824,133 @@ func (s *connection) handlePacketImpl(rp *receivedPacket) bool {
data := rp.data
p := rp
for len(data) > 0 {
+ var destConnID protocol.ConnectionID
if counter > 0 {
p = p.Clone()
p.data = data
- }
- hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnIDLen)
- if err != nil {
- if s.tracer != nil {
- dropReason := logging.PacketDropHeaderParseError
- if err == wire.ErrUnsupportedVersion {
- dropReason = logging.PacketDropUnsupportedVersion
+ var err error
+ destConnID, err = wire.ParseConnectionID(p.data, s.srcConnIDLen)
+ if err != nil {
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError)
}
- s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), dropReason)
+ s.logger.Debugf("error parsing packet, couldn't parse connection ID: %s", err)
+ break
+ }
+ if destConnID != lastConnID {
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID)
+ }
+ s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", destConnID, lastConnID)
+ break
}
- s.logger.Debugf("error parsing packet: %s", err)
- break
}
- if hdr.IsLongHeader && hdr.Version != s.version {
- if s.tracer != nil {
- s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion)
+ if wire.IsLongHeaderPacket(p.data[0]) {
+ hdr, packetData, rest, err := wire.ParsePacket(p.data)
+ if err != nil {
+ if s.tracer != nil {
+ dropReason := logging.PacketDropHeaderParseError
+ if err == wire.ErrUnsupportedVersion {
+ dropReason = logging.PacketDropUnsupportedVersion
+ }
+ s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.ByteCount(len(data)), dropReason)
+ }
+ s.logger.Debugf("error parsing packet: %s", err)
+ break
}
- s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version)
- break
- }
+ lastConnID = hdr.DestConnectionID
- if counter > 0 && !hdr.DestConnectionID.Equal(lastConnID) {
- if s.tracer != nil {
- s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnknownConnectionID)
+ if hdr.Version != s.version {
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), protocol.ByteCount(len(data)), logging.PacketDropUnexpectedVersion)
+ }
+ s.logger.Debugf("Dropping packet with version %x. Expected %x.", hdr.Version, s.version)
+ break
+ }
+
+ if counter > 0 {
+ p.buffer.Split()
}
- s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", hdr.DestConnectionID, lastConnID)
+ counter++
+
+ // only log if this actually a coalesced packet
+ if s.logger.Debug() && (counter > 1 || len(rest) > 0) {
+ s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest))
+ }
+
+ p.data = packetData
+
+ if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed {
+ processed = true
+ }
+ data = rest
+ } else {
+ if counter > 0 {
+ p.buffer.Split()
+ }
+ processed = s.handleShortHeaderPacket(p, destConnID)
break
}
- lastConnID = hdr.DestConnectionID
+ }
- if counter > 0 {
- p.buffer.Split()
- }
- counter++
+ p.buffer.MaybeRelease()
+ return processed
+}
+
+func (s *connection) handleShortHeaderPacket(p *receivedPacket, destConnID protocol.ConnectionID) bool {
+ var wasQueued bool
- // only log if this actually a coalesced packet
- if s.logger.Debug() && (counter > 1 || len(rest) > 0) {
- s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest))
+ defer func() {
+ // Put back the packet buffer if the packet wasn't queued for later decryption.
+ if !wasQueued {
+ p.buffer.Decrement()
}
- p.data = packetData
- if wasProcessed := s.handleSinglePacket(p, hdr); wasProcessed {
- processed = true
+ }()
+
+ pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data)
+ if err != nil {
+ wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT)
+ return false
+ }
+
+ if s.logger.Debug() {
+ s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID)
+ wire.LogShortHeader(s.logger, destConnID, pn, pnLen, keyPhase)
+ }
+
+ if s.receivedPacketHandler.IsPotentiallyDuplicate(pn, protocol.Encryption1RTT) {
+ s.logger.Debugf("Dropping (potentially) duplicate packet.")
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(logging.PacketType1RTT, p.Size(), logging.PacketDropDuplicate)
}
- data = rest
+ return false
}
- p.buffer.MaybeRelease()
- return processed
+
+ var log func([]logging.Frame)
+ if s.tracer != nil {
+ log = func(frames []logging.Frame) {
+ s.tracer.ReceivedShortHeaderPacket(
+ &logging.ShortHeader{
+ DestConnectionID: destConnID,
+ PacketNumber: pn,
+ PacketNumberLen: pnLen,
+ KeyPhase: keyPhase,
+ },
+ p.Size(),
+ frames,
+ )
+ }
+ }
+ if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil {
+ s.closeLocal(err)
+ return false
+ }
+ return true
}
-func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ {
+func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ {
var wasQueued bool
defer func() {
@@ -913,7 +966,7 @@ func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) boo
// The server can change the source connection ID with the first Handshake packet.
// After this, all packets with a different source connection have to be ignored.
- if s.receivedFirstPacket && hdr.IsLongHeader && hdr.Type == protocol.PacketTypeInitial && !hdr.SrcConnectionID.Equal(s.handshakeDestConnID) {
+ if s.receivedFirstPacket && hdr.Type == protocol.PacketTypeInitial && hdr.SrcConnectionID != s.handshakeDestConnID {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeInitial, p.Size(), logging.PacketDropUnknownConnectionID)
}
@@ -928,53 +981,18 @@ func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) boo
return false
}
- packet, err := s.unpacker.Unpack(hdr, p.rcvTime, p.data)
+ packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version)
if err != nil {
- switch err {
- case handshake.ErrKeysDropped:
- if s.tracer != nil {
- s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropKeyUnavailable)
- }
- s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", hdr.PacketType(), p.Size())
- case handshake.ErrKeysNotYetAvailable:
- // Sealer for this encryption level not yet available.
- // Try again later.
- wasQueued = true
- s.tryQueueingUndecryptablePacket(p, hdr)
- case wire.ErrInvalidReservedBits:
- s.closeLocal(&qerr.TransportError{
- ErrorCode: qerr.ProtocolViolation,
- ErrorMessage: err.Error(),
- })
- case handshake.ErrDecryptionFailed:
- // This might be a packet injected by an attacker. Drop it.
- if s.tracer != nil {
- s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropPayloadDecryptError)
- }
- s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", hdr.PacketType(), p.Size(), err)
- default:
- var headerErr *headerParseError
- if errors.As(err, &headerErr) {
- // This might be a packet injected by an attacker. Drop it.
- if s.tracer != nil {
- s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropHeaderParseError)
- }
- s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", hdr.PacketType(), p.Size(), err)
- } else {
- // This is an error returned by the AEAD (other than ErrDecryptionFailed).
- // For example, a PROTOCOL_VIOLATION due to key updates.
- s.closeLocal(err)
- }
- }
+ wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr))
return false
}
if s.logger.Debug() {
- s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.packetNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel)
+ s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, %s", packet.hdr.PacketNumber, p.Size(), hdr.DestConnectionID, packet.encryptionLevel)
packet.hdr.Log(s.logger)
}
- if s.receivedPacketHandler.IsPotentiallyDuplicate(packet.packetNumber, packet.encryptionLevel) {
+ if s.receivedPacketHandler.IsPotentiallyDuplicate(packet.hdr.PacketNumber, packet.encryptionLevel) {
s.logger.Debugf("Dropping (potentially) duplicate packet.")
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropDuplicate)
@@ -982,13 +1000,53 @@ func (s *connection) handleSinglePacket(p *receivedPacket, hdr *wire.Header) boo
return false
}
- if err := s.handleUnpackedPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
+ if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
s.closeLocal(err)
return false
}
return true
}
+func (s *connection) handleUnpackError(err error, p *receivedPacket, pt logging.PacketType) (wasQueued bool) {
+ switch err {
+ case handshake.ErrKeysDropped:
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropKeyUnavailable)
+ }
+ s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size())
+ case handshake.ErrKeysNotYetAvailable:
+ // Sealer for this encryption level not yet available.
+ // Try again later.
+ s.tryQueueingUndecryptablePacket(p, pt)
+ return true
+ case wire.ErrInvalidReservedBits:
+ s.closeLocal(&qerr.TransportError{
+ ErrorCode: qerr.ProtocolViolation,
+ ErrorMessage: err.Error(),
+ })
+ case handshake.ErrDecryptionFailed:
+ // This might be a packet injected by an attacker. Drop it.
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropPayloadDecryptError)
+ }
+ s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err)
+ default:
+ var headerErr *headerParseError
+ if errors.As(err, &headerErr) {
+ // This might be a packet injected by an attacker. Drop it.
+ if s.tracer != nil {
+ s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropHeaderParseError)
+ }
+ s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err)
+ } else {
+ // This is an error returned by the AEAD (other than ErrDecryptionFailed).
+ // For example, a PROTOCOL_VIOLATION due to key updates.
+ s.closeLocal(err)
+ }
+ }
+ return false
+}
+
func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte) bool /* was this a valid Retry */ {
if s.perspective == protocol.PerspectiveServer {
if s.tracer != nil {
@@ -1005,7 +1063,7 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte) bool /* wa
return false
}
destConnID := s.connIDManager.Get()
- if hdr.SrcConnectionID.Equal(destConnID) {
+ if hdr.SrcConnectionID == destConnID {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeRetry, protocol.ByteCount(len(data)), logging.PacketDropUnexpectedPacket)
}
@@ -1060,7 +1118,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) {
return
}
- hdr, supportedVersions, err := wire.ParseVersionNegotiationPacket(bytes.NewReader(p.data))
+ src, dest, supportedVersions, err := wire.ParseVersionNegotiationPacket(p.data)
if err != nil {
if s.tracer != nil {
s.tracer.DroppedPacket(logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropHeaderParseError)
@@ -1082,7 +1140,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) {
s.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", supportedVersions)
if s.tracer != nil {
- s.tracer.ReceivedVersionNegotiationPacket(hdr, supportedVersions)
+ s.tracer.ReceivedVersionNegotiationPacket(dest, src, supportedVersions)
}
newVersion, ok := protocol.ChooseSupportedVersion(s.config.Versions, supportedVersions)
if !ok {
@@ -1105,19 +1163,12 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) {
})
}
-func (s *connection) handleUnpackedPacket(
+func (s *connection) handleUnpackedLongHeaderPacket(
packet *unpackedPacket,
ecn protocol.ECN,
rcvTime time.Time,
packetSize protocol.ByteCount, // only for logging
) error {
- if len(packet.data) == 0 {
- return &qerr.TransportError{
- ErrorCode: qerr.ProtocolViolation,
- ErrorMessage: "empty packet",
- }
- }
-
if !s.receivedFirstPacket {
s.receivedFirstPacket = true
if !s.versionNegotiated && s.tracer != nil {
@@ -1131,7 +1182,7 @@ func (s *connection) handleUnpackedPacket(
s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions)
}
// The server can change the source connection ID with the first Handshake packet.
- if s.perspective == protocol.PerspectiveClient && packet.hdr.IsLongHeader && !packet.hdr.SrcConnectionID.Equal(s.handshakeDestConnID) {
+ if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID {
cid := packet.hdr.SrcConnectionID
s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid)
s.handshakeDestConnID = cid
@@ -1140,10 +1191,10 @@ func (s *connection) handleUnpackedPacket(
// We create the connection as soon as we receive the first packet from the client.
// We do that before authenticating the packet.
// That means that if the source connection ID was corrupted,
- // we might have create a connection with an incorrect source connection ID.
+ // we might have created a connection with an incorrect source connection ID.
// Once we authenticate the first packet, we need to update it.
if s.perspective == protocol.PerspectiveServer {
- if !packet.hdr.SrcConnectionID.Equal(s.handshakeDestConnID) {
+ if packet.hdr.SrcConnectionID != s.handshakeDestConnID {
s.handshakeDestConnID = packet.hdr.SrcConnectionID
s.connIDManager.ChangeInitialConnID(packet.hdr.SrcConnectionID)
}
@@ -1162,16 +1213,53 @@ func (s *connection) handleUnpackedPacket(
s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
s.keepAlivePingSent = false
+ var log func([]logging.Frame)
+ if s.tracer != nil {
+ log = func(frames []logging.Frame) {
+ s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, frames)
+ }
+ }
+ isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log)
+ if err != nil {
+ return err
+ }
+ return s.receivedPacketHandler.ReceivedPacket(packet.hdr.PacketNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting)
+}
+
+func (s *connection) handleUnpackedShortHeaderPacket(
+ destConnID protocol.ConnectionID,
+ pn protocol.PacketNumber,
+ data []byte,
+ ecn protocol.ECN,
+ rcvTime time.Time,
+ log func([]logging.Frame),
+) error {
+ s.lastPacketReceivedTime = rcvTime
+ s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
+ s.keepAlivePingSent = false
+
+ isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log)
+ if err != nil {
+ return err
+ }
+ return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting)
+}
+
+func (s *connection) handleFrames(
+ data []byte,
+ destConnID protocol.ConnectionID,
+ encLevel protocol.EncryptionLevel,
+ log func([]logging.Frame),
+) (isAckEliciting bool, _ error) {
// Only used for tracing.
// If we're not tracing, this slice will always remain empty.
var frames []wire.Frame
- r := bytes.NewReader(packet.data)
- var isAckEliciting bool
- for {
- frame, err := s.frameParser.ParseNext(r, packet.encryptionLevel)
+ for len(data) > 0 {
+ l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version)
if err != nil {
- return err
+ return false, err
}
+ data = data[l:]
if frame == nil {
break
}
@@ -1180,29 +1268,28 @@ func (s *connection) handleUnpackedPacket(
}
// Only process frames now if we're not logging.
// If we're logging, we need to make sure that the packet_received event is logged first.
- if s.tracer == nil {
- if err := s.handleFrame(frame, packet.encryptionLevel, packet.hdr.DestConnectionID); err != nil {
- return err
+ if log == nil {
+ if err := s.handleFrame(frame, encLevel, destConnID); err != nil {
+ return false, err
}
} else {
frames = append(frames, frame)
}
}
- if s.tracer != nil {
+ if log != nil {
fs := make([]logging.Frame, len(frames))
for i, frame := range frames {
fs[i] = logutils.ConvertFrame(frame)
}
- s.tracer.ReceivedPacket(packet.hdr, packetSize, fs)
+ log(fs)
for _, frame := range frames {
- if err := s.handleFrame(frame, packet.encryptionLevel, packet.hdr.DestConnectionID); err != nil {
- return err
+ if err := s.handleFrame(frame, encLevel, destConnID); err != nil {
+ return false, err
}
}
}
-
- return s.receivedPacketHandler.ReceivedPacket(packet.packetNumber, ecn, packet.encryptionLevel, rcvTime, isAckEliciting)
+ return
}
func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error {
@@ -1215,6 +1302,7 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel
err = s.handleStreamFrame(frame)
case *wire.AckFrame:
err = s.handleAckFrame(frame, encLevel)
+ wire.PutAckFrame(frame)
case *wire.ConnectionCloseFrame:
s.handleConnectionCloseFrame(frame)
case *wire.ResetStreamFrame:
@@ -1513,7 +1601,7 @@ func (s *connection) handleCloseError(closeErr *closeError) {
// If this is a remote close we're done here
if closeErr.remote {
- s.connIDGenerator.ReplaceWithClosed(newClosedRemoteConn(s.perspective))
+ s.connIDGenerator.ReplaceWithClosed(s.perspective, nil)
return
}
if closeErr.immediate {
@@ -1530,8 +1618,7 @@ func (s *connection) handleCloseError(closeErr *closeError) {
if err != nil {
s.logger.Debugf("Error sending CONNECTION_CLOSE: %s", err)
}
- cs := newClosedLocalConn(s.conn, connClosePacket, s.perspective, s.logger)
- s.connIDGenerator.ReplaceWithClosed(cs)
+ s.connIDGenerator.ReplaceWithClosed(s.perspective, connClosePacket)
}
func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) {
@@ -1561,6 +1648,9 @@ func (s *connection) restoreTransportParameters(params *wire.TransportParameters
s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
s.connFlowController.UpdateSendWindow(params.InitialMaxData)
s.streamsMap.UpdateLimits(params)
+ s.connStateMutex.Lock()
+ s.connState.SupportsDatagrams = s.supportsDatagrams()
+ s.connStateMutex.Unlock()
}
func (s *connection) handleTransportParameters(params *wire.TransportParameters) {
@@ -1569,6 +1659,7 @@ func (s *connection) handleTransportParameters(params *wire.TransportParameters)
ErrorCode: qerr.TransportParameterError,
ErrorMessage: err.Error(),
})
+ return
}
s.peerParams = params
// On the client side we have to wait for handshake completion.
@@ -1579,6 +1670,10 @@ func (s *connection) handleTransportParameters(params *wire.TransportParameters)
// the client's transport parameters.
close(s.earlyConnReadyChan)
}
+
+ s.connStateMutex.Lock()
+ s.connState.SupportsDatagrams = s.supportsDatagrams()
+ s.connStateMutex.Unlock()
}
func (s *connection) checkTransportParameters(params *wire.TransportParameters) error {
@@ -1590,7 +1685,7 @@ func (s *connection) checkTransportParameters(params *wire.TransportParameters)
}
// check the initial_source_connection_id
- if !params.InitialSourceConnectionID.Equal(s.handshakeDestConnID) {
+ if params.InitialSourceConnectionID != s.handshakeDestConnID {
return fmt.Errorf("expected initial_source_connection_id to equal %s, is %s", s.handshakeDestConnID, params.InitialSourceConnectionID)
}
@@ -1598,14 +1693,14 @@ func (s *connection) checkTransportParameters(params *wire.TransportParameters)
return nil
}
// check the original_destination_connection_id
- if !params.OriginalDestinationConnectionID.Equal(s.origDestConnID) {
+ if params.OriginalDestinationConnectionID != s.origDestConnID {
return fmt.Errorf("expected original_destination_connection_id to equal %s, is %s", s.origDestConnID, params.OriginalDestinationConnectionID)
}
if s.retrySrcConnID != nil { // a Retry was performed
if params.RetrySourceConnectionID == nil {
return errors.New("missing retry_source_connection_id")
}
- if !(*params.RetrySourceConnectionID).Equal(*s.retrySrcConnID) {
+ if *params.RetrySourceConnectionID != *s.retrySrcConnID {
return fmt.Errorf("expected retry_source_connection_id to equal %s, is %s", s.retrySrcConnID, *params.RetrySourceConnectionID)
}
} else if params.RetrySourceConnectionID != nil {
@@ -1618,7 +1713,7 @@ func (s *connection) applyTransportParameters() {
params := s.peerParams
// Our local idle timeout will always be > 0.
s.idleTimeout = utils.MinNonZeroDuration(s.config.MaxIdleTimeout, params.MaxIdleTimeout)
- s.keepAliveInterval = utils.MinDuration(s.config.KeepAlivePeriod, utils.MinDuration(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
+ s.keepAliveInterval = utils.Min(s.config.KeepAlivePeriod, utils.Min(s.idleTimeout/2, protocol.MaxKeepAliveInterval))
s.streamsMap.UpdateLimits(params)
s.packer.HandleTransportParameters(params)
s.frameParser.SetAckDelayExponent(params.AckDelayExponent)
@@ -1667,7 +1762,7 @@ func (s *connection) sendPackets() error {
}
// We can at most send a single ACK only packet.
// There will only be a new ACK after receiving new packets.
- // SendAck is only returned when we're congestion limited, so we don't need to set the pacingt timer.
+ // SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer.
return s.maybeSendAckOnlyPacket()
case ackhandler.SendPTOInitial:
if err := s.sendProbePacket(protocol.EncryptionInitial); err != nil {
@@ -1702,27 +1797,41 @@ func (s *connection) sendPackets() error {
}
func (s *connection) maybeSendAckOnlyPacket() error {
- packet, err := s.packer.MaybePackAckPacket(s.handshakeConfirmed)
+ if !s.handshakeConfirmed {
+ packet, err := s.packer.PackCoalescedPacket(true, s.version)
+ if err != nil {
+ return err
+ }
+ if packet == nil {
+ return nil
+ }
+ s.sendPackedCoalescedPacket(packet, time.Now())
+ return nil
+ }
+
+ now := time.Now()
+ p, buffer, err := s.packer.PackPacket(true, now, s.version)
if err != nil {
+ if err == errNothingToPack {
+ return nil
+ }
return err
}
- if packet == nil {
- return nil
- }
- s.sendPackedPacket(packet, time.Now())
+ s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false)
+ s.sendPackedShortHeaderPacket(buffer, p.Packet, now)
return nil
}
func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error {
// Queue probe packets until we actually send out a packet,
// or until there are no more packets to queue.
- var packet *packedPacket
+ var packet *coalescedPacket
for {
if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued {
break
}
var err error
- packet, err = s.packer.MaybePackProbePacket(encLevel)
+ packet, err = s.packer.MaybePackProbePacket(encLevel, s.version)
if err != nil {
return err
}
@@ -1743,15 +1852,15 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error {
panic("unexpected encryption level")
}
var err error
- packet, err = s.packer.MaybePackProbePacket(encLevel)
+ packet, err = s.packer.MaybePackProbePacket(encLevel, s.version)
if err != nil {
return err
}
}
- if packet == nil || packet.packetContents == nil {
+ if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) {
return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel)
}
- s.sendPackedPacket(packet, time.Now())
+ s.sendPackedCoalescedPacket(packet, time.Now())
return nil
}
@@ -1763,44 +1872,59 @@ func (s *connection) sendPacket() (bool, error) {
now := time.Now()
if !s.handshakeConfirmed {
- packet, err := s.packer.PackCoalescedPacket()
+ packet, err := s.packer.PackCoalescedPacket(false, s.version)
if err != nil || packet == nil {
return false, err
}
s.sentFirstPacket = true
- s.logCoalescedPacket(packet)
- for _, p := range packet.packets {
- if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
- s.firstAckElicitingPacketAfterIdleSentTime = now
- }
- s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue))
- }
- s.connIDManager.SentPacket()
- s.sendQueue.Send(packet.buffer)
+ s.sendPackedCoalescedPacket(packet, now)
return true, nil
- }
- if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) {
- packet, err := s.packer.PackMTUProbePacket(s.mtuDiscoverer.GetPing())
+ } else if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) {
+ ping, size := s.mtuDiscoverer.GetPing()
+ p, buffer, err := s.packer.PackMTUProbePacket(ping, size, now, s.version)
if err != nil {
return false, err
}
- s.sendPackedPacket(packet, now)
+ s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false)
+ s.sendPackedShortHeaderPacket(buffer, p.Packet, now)
return true, nil
}
- packet, err := s.packer.PackPacket()
- if err != nil || packet == nil {
+ p, buffer, err := s.packer.PackPacket(false, now, s.version)
+ if err != nil {
+ if err == errNothingToPack {
+ return false, nil
+ }
return false, err
}
- s.sendPackedPacket(packet, now)
+ s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false)
+ s.sendPackedShortHeaderPacket(buffer, p.Packet, now)
return true, nil
}
-func (s *connection) sendPackedPacket(packet *packedPacket, now time.Time) {
- if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && packet.IsAckEliciting() {
+func (s *connection) sendPackedShortHeaderPacket(buffer *packetBuffer, p *ackhandler.Packet, now time.Time) {
+ if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && ackhandler.HasAckElicitingFrames(p.Frames) {
s.firstAckElicitingPacketAfterIdleSentTime = now
}
- s.logPacket(packet)
- s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket(now, s.retransmissionQueue))
+
+ s.sentPacketHandler.SentPacket(p)
+ s.connIDManager.SentPacket()
+ s.sendQueue.Send(buffer)
+}
+
+func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, now time.Time) {
+ s.logCoalescedPacket(packet)
+ for _, p := range packet.longHdrPackets {
+ if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
+ s.firstAckElicitingPacketAfterIdleSentTime = now
+ }
+ s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue))
+ }
+ if p := packet.shortHdrPacket; p != nil {
+ if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
+ s.firstAckElicitingPacketAfterIdleSentTime = now
+ }
+ s.sentPacketHandler.SentPacket(p.Packet)
+ }
s.connIDManager.SentPacket()
s.sendQueue.Send(packet.buffer)
}
@@ -1811,14 +1935,14 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) {
var transportErr *qerr.TransportError
var applicationErr *qerr.ApplicationError
if errors.As(e, &transportErr) {
- packet, err = s.packer.PackConnectionClose(transportErr)
+ packet, err = s.packer.PackConnectionClose(transportErr, s.version)
} else if errors.As(e, &applicationErr) {
- packet, err = s.packer.PackApplicationClose(applicationErr)
+ packet, err = s.packer.PackApplicationClose(applicationErr, s.version)
} else {
packet, err = s.packer.PackConnectionClose(&qerr.TransportError{
ErrorCode: qerr.InternalError,
ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()),
- })
+ }, s.version)
}
if err != nil {
return nil, err
@@ -1827,47 +1951,109 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) {
return packet.buffer.Data, s.conn.Write(packet.buffer.Data)
}
-func (s *connection) logPacketContents(p *packetContents) {
+func (s *connection) logLongHeaderPacket(p *longHeaderPacket) {
+ // quic-go logging
+ if s.logger.Debug() {
+ p.header.Log(s.logger)
+ if p.ack != nil {
+ wire.LogFrame(s.logger, p.ack, true)
+ }
+ for _, frame := range p.frames {
+ wire.LogFrame(s.logger, frame.Frame, true)
+ }
+ }
+
// tracing
if s.tracer != nil {
frames := make([]logging.Frame, 0, len(p.frames))
for _, f := range p.frames {
frames = append(frames, logutils.ConvertFrame(f.Frame))
}
- s.tracer.SentPacket(p.header, p.length, p.ack, frames)
+ var ack *logging.AckFrame
+ if p.ack != nil {
+ ack = logutils.ConvertAckFrame(p.ack)
+ }
+ s.tracer.SentLongHeaderPacket(p.header, p.length, ack, frames)
}
+}
- // quic-go logging
- if !s.logger.Debug() {
- return
+func (s *connection) logShortHeaderPacket(
+ destConnID protocol.ConnectionID,
+ ackFrame *wire.AckFrame,
+ frames []*ackhandler.Frame,
+ pn protocol.PacketNumber,
+ pnLen protocol.PacketNumberLen,
+ kp protocol.KeyPhaseBit,
+ size protocol.ByteCount,
+ isCoalesced bool,
+) {
+ if s.logger.Debug() && !isCoalesced {
+ s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT", pn, size, s.logID)
}
- p.header.Log(s.logger)
- if p.ack != nil {
- wire.LogFrame(s.logger, p.ack, true)
+ // quic-go logging
+ if s.logger.Debug() {
+ wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp)
+ if ackFrame != nil {
+ wire.LogFrame(s.logger, ackFrame, true)
+ }
+ for _, frame := range frames {
+ wire.LogFrame(s.logger, frame.Frame, true)
+ }
}
- for _, frame := range p.frames {
- wire.LogFrame(s.logger, frame.Frame, true)
+
+ // tracing
+ if s.tracer != nil {
+ fs := make([]logging.Frame, 0, len(frames))
+ for _, f := range frames {
+ fs = append(fs, logutils.ConvertFrame(f.Frame))
+ }
+ var ack *logging.AckFrame
+ if ackFrame != nil {
+ ack = logutils.ConvertAckFrame(ackFrame)
+ }
+ s.tracer.SentShortHeaderPacket(
+ &logging.ShortHeader{
+ DestConnectionID: destConnID,
+ PacketNumber: pn,
+ PacketNumberLen: pnLen,
+ KeyPhase: kp,
+ },
+ size,
+ ack,
+ fs,
+ )
}
}
func (s *connection) logCoalescedPacket(packet *coalescedPacket) {
if s.logger.Debug() {
- if len(packet.packets) > 1 {
- s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.packets), packet.buffer.Len(), s.logID)
+ // There's a short period between dropping both Initial and Handshake keys and completion of the handshake,
+ // during which we might call PackCoalescedPacket but just pack a short header packet.
+ if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil {
+ s.logShortHeaderPacket(
+ packet.shortHdrPacket.DestConnID,
+ packet.shortHdrPacket.Ack,
+ packet.shortHdrPacket.Frames,
+ packet.shortHdrPacket.PacketNumber,
+ packet.shortHdrPacket.PacketNumberLen,
+ packet.shortHdrPacket.KeyPhase,
+ packet.shortHdrPacket.Length,
+ false,
+ )
+ return
+ }
+ if len(packet.longHdrPackets) > 1 {
+ s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID)
} else {
- s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.packets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.packets[0].EncryptionLevel())
+ s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel())
}
}
- for _, p := range packet.packets {
- s.logPacketContents(p)
+ for _, p := range packet.longHdrPackets {
+ s.logLongHeaderPacket(p)
}
-}
-
-func (s *connection) logPacket(packet *packedPacket) {
- if s.logger.Debug() {
- s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.header.PacketNumber, packet.buffer.Len(), s.logID, packet.EncryptionLevel())
+ if p := packet.shortHdrPacket; p != nil {
+ s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, p.Length, true)
}
- s.logPacketContents(packet.packetContents)
}
// AcceptStream returns the next stream openend by the peer
@@ -1925,20 +2111,22 @@ func (s *connection) scheduleSending() {
}
}
-func (s *connection) tryQueueingUndecryptablePacket(p *receivedPacket, hdr *wire.Header) {
+// tryQueueingUndecryptablePacket queues a packet for which we're missing the decryption keys.
+// The logging.PacketType is only used for logging purposes.
+func (s *connection) tryQueueingUndecryptablePacket(p *receivedPacket, pt logging.PacketType) {
if s.handshakeComplete {
panic("shouldn't queue undecryptable packets after handshake completion")
}
if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets {
if s.tracer != nil {
- s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropDOSPrevention)
+ s.tracer.DroppedPacket(pt, p.Size(), logging.PacketDropDOSPrevention)
}
s.logger.Infof("Dropping undecryptable packet (%d bytes). Undecryptable packet queue full.", p.Size())
return
}
s.logger.Infof("Queueing packet (%d bytes) for later decryption", p.Size())
if s.tracer != nil {
- s.tracer.BufferedPacket(logging.PacketTypeFromHeader(hdr))
+ s.tracer.BufferedPacket(pt, p.Size())
}
s.undecryptablePackets = append(s.undecryptablePackets, p)
}
@@ -1970,6 +2158,10 @@ func (s *connection) onStreamCompleted(id protocol.StreamID) {
}
func (s *connection) SendMessage(p []byte) error {
+ if !s.supportsDatagrams() {
+ return errors.New("datagram support disabled")
+ }
+
f := &wire.DatagramFrame{DataLenPresent: true}
if protocol.ByteCount(len(p)) > f.MaxDataLen(s.peerParams.MaxDatagramFrameSize, s.version) {
return errors.New("message too large")
@@ -1980,6 +2172,9 @@ func (s *connection) SendMessage(p []byte) error {
}
func (s *connection) ReceiveMessage() ([]byte, error) {
+ if !s.config.EnableDatagrams {
+ return nil, errors.New("datagram support disabled")
+ }
return s.datagramQueue.Receive()
}
diff --git a/vendor/github.com/quic-go/quic-go/connection_timer.go b/vendor/github.com/quic-go/quic-go/connection_timer.go
new file mode 100644
index 000000000..171fdd013
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/connection_timer.go
@@ -0,0 +1,51 @@
+package quic
+
+import (
+ "time"
+
+ "github.com/quic-go/quic-go/internal/utils"
+)
+
+var deadlineSendImmediately = time.Time{}.Add(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine
+
+type connectionTimer struct {
+ timer *utils.Timer
+ last time.Time
+}
+
+func newTimer() *connectionTimer {
+ return &connectionTimer{timer: utils.NewTimer()}
+}
+
+func (t *connectionTimer) SetRead() {
+ if deadline := t.timer.Deadline(); deadline != deadlineSendImmediately {
+ t.last = deadline
+ }
+ t.timer.SetRead()
+}
+
+func (t *connectionTimer) Chan() <-chan time.Time {
+ return t.timer.Chan()
+}
+
+// SetTimer resets the timer.
+// It makes sure that the deadline is strictly increasing.
+// This prevents busy-looping in cases where the timer fires, but we can't actually send out a packet.
+// This doesn't apply to the pacing deadline, which can be set multiple times to deadlineSendImmediately.
+func (t *connectionTimer) SetTimer(idleTimeoutOrKeepAlive, ackAlarm, lossTime, pacing time.Time) {
+ deadline := idleTimeoutOrKeepAlive
+ if !ackAlarm.IsZero() && ackAlarm.Before(deadline) && ackAlarm.After(t.last) {
+ deadline = ackAlarm
+ }
+ if !lossTime.IsZero() && lossTime.Before(deadline) && lossTime.After(t.last) {
+ deadline = lossTime
+ }
+ if !pacing.IsZero() && pacing.Before(deadline) {
+ deadline = pacing
+ }
+ t.timer.Reset(deadline)
+}
+
+func (t *connectionTimer) Stop() {
+ t.timer.Stop()
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go b/vendor/github.com/quic-go/quic-go/crypto_stream.go
similarity index 88%
rename from vendor/github.com/lucas-clemente/quic-go/crypto_stream.go
rename to vendor/github.com/quic-go/quic-go/crypto_stream.go
index 36e21d330..f10e91202 100644
--- a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go
+++ b/vendor/github.com/quic-go/quic-go/crypto_stream.go
@@ -4,10 +4,10 @@ import (
"fmt"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type cryptoStream interface {
@@ -56,7 +56,7 @@ func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error {
// could e.g. be a retransmission
return nil
}
- s.highestOffset = utils.MaxByteCount(s.highestOffset, highestOffset)
+ s.highestOffset = utils.Max(s.highestOffset, highestOffset)
if err := s.queue.Push(f.Data, f.Offset, nil); err != nil {
return err
}
@@ -107,7 +107,7 @@ func (s *cryptoStreamImpl) HasData() bool {
func (s *cryptoStreamImpl) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame {
f := &wire.CryptoFrame{Offset: s.writeOffset}
- n := utils.MinByteCount(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
+ n := utils.Min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
f.Data = s.writeBuf[:n]
s.writeBuf = s.writeBuf[n:]
s.writeOffset += n
diff --git a/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go
similarity index 93%
rename from vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go
rename to vendor/github.com/quic-go/quic-go/crypto_stream_manager.go
index 66f900490..91946acfa 100644
--- a/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go
+++ b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go
@@ -3,8 +3,8 @@ package quic
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
type cryptoDataHandler interface {
diff --git a/vendor/github.com/lucas-clemente/quic-go/datagram_queue.go b/vendor/github.com/quic-go/quic-go/datagram_queue.go
similarity index 51%
rename from vendor/github.com/lucas-clemente/quic-go/datagram_queue.go
rename to vendor/github.com/quic-go/quic-go/datagram_queue.go
index b1cbbf6dc..59c7d069b 100644
--- a/vendor/github.com/lucas-clemente/quic-go/datagram_queue.go
+++ b/vendor/github.com/quic-go/quic-go/datagram_queue.go
@@ -1,14 +1,20 @@
package quic
import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "sync"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type datagramQueue struct {
sendQueue chan *wire.DatagramFrame
- rcvQueue chan []byte
+ nextFrame *wire.DatagramFrame
+
+ rcvMx sync.Mutex
+ rcvQueue [][]byte
+ rcvd chan struct{} // used to notify Receive that a new datagram was received
closeErr error
closed chan struct{}
@@ -24,7 +30,7 @@ func newDatagramQueue(hasData func(), logger utils.Logger) *datagramQueue {
return &datagramQueue{
hasData: hasData,
sendQueue: make(chan *wire.DatagramFrame, 1),
- rcvQueue: make(chan []byte, protocol.DatagramRcvQueueLen),
+ rcvd: make(chan struct{}, 1),
dequeued: make(chan struct{}),
closed: make(chan struct{}),
logger: logger,
@@ -49,35 +55,65 @@ func (h *datagramQueue) AddAndWait(f *wire.DatagramFrame) error {
}
}
-// Get dequeues a DATAGRAM frame for sending.
-func (h *datagramQueue) Get() *wire.DatagramFrame {
+// Peek gets the next DATAGRAM frame for sending.
+// If actually sent out, Pop needs to be called before the next call to Peek.
+func (h *datagramQueue) Peek() *wire.DatagramFrame {
+ if h.nextFrame != nil {
+ return h.nextFrame
+ }
select {
- case f := <-h.sendQueue:
+ case h.nextFrame = <-h.sendQueue:
h.dequeued <- struct{}{}
- return f
default:
return nil
}
+ return h.nextFrame
+}
+
+func (h *datagramQueue) Pop() {
+ if h.nextFrame == nil {
+ panic("datagramQueue BUG: Pop called for nil frame")
+ }
+ h.nextFrame = nil
}
// HandleDatagramFrame handles a received DATAGRAM frame.
func (h *datagramQueue) HandleDatagramFrame(f *wire.DatagramFrame) {
data := make([]byte, len(f.Data))
copy(data, f.Data)
- select {
- case h.rcvQueue <- data:
- default:
+ var queued bool
+ h.rcvMx.Lock()
+ if len(h.rcvQueue) < protocol.DatagramRcvQueueLen {
+ h.rcvQueue = append(h.rcvQueue, data)
+ queued = true
+ select {
+ case h.rcvd <- struct{}{}:
+ default:
+ }
+ }
+ h.rcvMx.Unlock()
+ if !queued && h.logger.Debug() {
h.logger.Debugf("Discarding DATAGRAM frame (%d bytes payload)", len(f.Data))
}
}
// Receive gets a received DATAGRAM frame.
func (h *datagramQueue) Receive() ([]byte, error) {
- select {
- case data := <-h.rcvQueue:
- return data, nil
- case <-h.closed:
- return nil, h.closeErr
+ for {
+ h.rcvMx.Lock()
+ if len(h.rcvQueue) > 0 {
+ data := h.rcvQueue[0]
+ h.rcvQueue = h.rcvQueue[1:]
+ h.rcvMx.Unlock()
+ return data, nil
+ }
+ h.rcvMx.Unlock()
+ select {
+ case <-h.rcvd:
+ continue
+ case <-h.closed:
+ return nil, h.closeErr
+ }
}
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/errors.go b/vendor/github.com/quic-go/quic-go/errors.go
similarity index 89%
rename from vendor/github.com/lucas-clemente/quic-go/errors.go
rename to vendor/github.com/quic-go/quic-go/errors.go
index 0c9f0004e..c9fb0a07b 100644
--- a/vendor/github.com/lucas-clemente/quic-go/errors.go
+++ b/vendor/github.com/quic-go/quic-go/errors.go
@@ -3,7 +3,7 @@ package quic
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/qerr"
)
type (
@@ -46,6 +46,7 @@ const (
type StreamError struct {
StreamID StreamID
ErrorCode StreamErrorCode
+ Remote bool
}
func (e *StreamError) Is(target error) bool {
@@ -54,5 +55,9 @@ func (e *StreamError) Is(target error) bool {
}
func (e *StreamError) Error() string {
- return fmt.Sprintf("stream %d canceled with error code %d", e.StreamID, e.ErrorCode)
+ pers := "local"
+ if e.Remote {
+ pers = "remote"
+ }
+ return fmt.Sprintf("stream %d canceled by %s with error code %d", e.StreamID, pers, e.ErrorCode)
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go b/vendor/github.com/quic-go/quic-go/frame_sorter.go
similarity index 86%
rename from vendor/github.com/lucas-clemente/quic-go/frame_sorter.go
rename to vendor/github.com/quic-go/quic-go/frame_sorter.go
index aeafa7d42..bee0abadb 100644
--- a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go
+++ b/vendor/github.com/quic-go/quic-go/frame_sorter.go
@@ -2,11 +2,24 @@ package quic
import (
"errors"
+ "sync"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ list "github.com/quic-go/quic-go/internal/utils/linkedlist"
)
+// byteInterval is an interval from one ByteCount to the other
+type byteInterval struct {
+ Start protocol.ByteCount
+ End protocol.ByteCount
+}
+
+var byteIntervalElementPool sync.Pool
+
+func init() {
+ byteIntervalElementPool = *list.NewPool[byteInterval]()
+}
+
type frameSorterEntry struct {
Data []byte
DoneCb func()
@@ -15,17 +28,17 @@ type frameSorterEntry struct {
type frameSorter struct {
queue map[protocol.ByteCount]frameSorterEntry
readPos protocol.ByteCount
- gaps *utils.ByteIntervalList
+ gaps *list.List[byteInterval]
}
var errDuplicateStreamData = errors.New("duplicate stream data")
func newFrameSorter() *frameSorter {
s := frameSorter{
- gaps: utils.NewByteIntervalList(),
+ gaps: list.NewWithPool[byteInterval](&byteIntervalElementPool),
queue: make(map[protocol.ByteCount]frameSorterEntry),
}
- s.gaps.PushFront(utils.ByteInterval{Start: 0, End: protocol.MaxByteCount})
+ s.gaps.PushFront(byteInterval{Start: 0, End: protocol.MaxByteCount})
return &s
}
@@ -118,7 +131,7 @@ func (s *frameSorter) push(data []byte, offset protocol.ByteCount, doneCb func()
if !startGapEqualsEndGap {
s.deleteConsecutive(startGapEnd)
- var nextGap *utils.ByteIntervalElement
+ var nextGap *list.Element[byteInterval]
for gap := startGapNext; gap.Value.End < endGapStart; gap = nextGap {
nextGap = gap.Next()
s.deleteConsecutive(gap.Value.End)
@@ -140,7 +153,7 @@ func (s *frameSorter) push(data []byte, offset protocol.ByteCount, doneCb func()
} else {
if startGapEqualsEndGap && adjustedStartGapEnd {
// The frame split the existing gap into two.
- s.gaps.InsertAfter(utils.ByteInterval{Start: end, End: startGapEnd}, startGap)
+ s.gaps.InsertAfter(byteInterval{Start: end, End: startGapEnd}, startGap)
} else if !startGapEqualsEndGap {
endGap.Value.Start = end
}
@@ -164,7 +177,7 @@ func (s *frameSorter) push(data []byte, offset protocol.ByteCount, doneCb func()
return nil
}
-func (s *frameSorter) findStartGap(offset protocol.ByteCount) (*utils.ByteIntervalElement, bool) {
+func (s *frameSorter) findStartGap(offset protocol.ByteCount) (*list.Element[byteInterval], bool) {
for gap := s.gaps.Front(); gap != nil; gap = gap.Next() {
if offset >= gap.Value.Start && offset <= gap.Value.End {
return gap, true
@@ -176,7 +189,7 @@ func (s *frameSorter) findStartGap(offset protocol.ByteCount) (*utils.ByteInterv
panic("no gap found")
}
-func (s *frameSorter) findEndGap(startGap *utils.ByteIntervalElement, offset protocol.ByteCount) (*utils.ByteIntervalElement, bool) {
+func (s *frameSorter) findEndGap(startGap *list.Element[byteInterval], offset protocol.ByteCount) (*list.Element[byteInterval], bool) {
for gap := startGap; gap != nil; gap = gap.Next() {
if offset >= gap.Value.Start && offset < gap.Value.End {
return gap, true
diff --git a/vendor/github.com/lucas-clemente/quic-go/framer.go b/vendor/github.com/quic-go/quic-go/framer.go
similarity index 77%
rename from vendor/github.com/lucas-clemente/quic-go/framer.go
rename to vendor/github.com/quic-go/quic-go/framer.go
index 29d36b85c..0b2059164 100644
--- a/vendor/github.com/lucas-clemente/quic-go/framer.go
+++ b/vendor/github.com/quic-go/quic-go/framer.go
@@ -4,20 +4,20 @@ import (
"errors"
"sync"
- "github.com/lucas-clemente/quic-go/internal/ackhandler"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/ackhandler"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/quicvarint"
)
type framer interface {
HasData() bool
QueueControlFrame(wire.Frame)
- AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
+ AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount)
AddActiveStream(protocol.StreamID)
- AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
+ AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount)
Handle0RTTRejection() error
}
@@ -26,7 +26,6 @@ type framerI struct {
mutex sync.Mutex
streamGetter streamGetter
- version protocol.VersionNumber
activeStreams map[protocol.StreamID]struct{}
streamQueue []protocol.StreamID
@@ -37,14 +36,10 @@ type framerI struct {
var _ framer = &framerI{}
-func newFramer(
- streamGetter streamGetter,
- v protocol.VersionNumber,
-) framer {
+func newFramer(streamGetter streamGetter) framer {
return &framerI{
streamGetter: streamGetter,
activeStreams: make(map[protocol.StreamID]struct{}),
- version: v,
}
}
@@ -67,16 +62,18 @@ func (f *framerI) QueueControlFrame(frame wire.Frame) {
f.controlFrameMutex.Unlock()
}
-func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) {
+func (f *framerI) AppendControlFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) {
var length protocol.ByteCount
f.controlFrameMutex.Lock()
for len(f.controlFrames) > 0 {
frame := f.controlFrames[len(f.controlFrames)-1]
- frameLen := frame.Length(f.version)
+ frameLen := frame.Length(v)
if length+frameLen > maxLen {
break
}
- frames = append(frames, ackhandler.Frame{Frame: frame})
+ af := ackhandler.GetFrame()
+ af.Frame = frame
+ frames = append(frames, af)
length += frameLen
f.controlFrames = f.controlFrames[:len(f.controlFrames)-1]
}
@@ -93,7 +90,7 @@ func (f *framerI) AddActiveStream(id protocol.StreamID) {
f.mutex.Unlock()
}
-func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) {
+func (f *framerI) AppendStreamFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) {
var length protocol.ByteCount
var lastFrame *ackhandler.Frame
f.mutex.Lock()
@@ -118,7 +115,7 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.
// Therefore, we can pretend to have more bytes available when popping
// the STREAM frame (which will always have the DataLen set).
remainingLen += quicvarint.Len(uint64(remainingLen))
- frame, hasMoreData := str.popStreamFrame(remainingLen)
+ frame, hasMoreData := str.popStreamFrame(remainingLen, v)
if hasMoreData { // put the stream back in the queue (at the end)
f.streamQueue = append(f.streamQueue, id)
} else { // no more data to send. Stream is not active any more
@@ -130,16 +127,16 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.
if frame == nil {
continue
}
- frames = append(frames, *frame)
- length += frame.Length(f.version)
+ frames = append(frames, frame)
+ length += frame.Length(v)
lastFrame = frame
}
f.mutex.Unlock()
if lastFrame != nil {
- lastFrameLen := lastFrame.Length(f.version)
+ lastFrameLen := lastFrame.Length(v)
// account for the smaller size of the last STREAM frame
lastFrame.Frame.(*wire.StreamFrame).DataLenPresent = false
- length += lastFrame.Length(f.version) - lastFrameLen
+ length += lastFrame.Length(v) - lastFrameLen
}
return frames, length
}
diff --git a/vendor/github.com/quic-go/quic-go/http3/body.go b/vendor/github.com/quic-go/quic-go/http3/body.go
new file mode 100644
index 000000000..15985a1c2
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/body.go
@@ -0,0 +1,135 @@
+package http3
+
+import (
+ "context"
+ "io"
+ "net"
+
+ "github.com/quic-go/quic-go"
+)
+
+// The HTTPStreamer allows taking over a HTTP/3 stream. The interface is implemented by:
+// * for the server: the http.Request.Body
+// * for the client: the http.Response.Body
+// On the client side, the stream will be closed for writing, unless the DontCloseRequestStream RoundTripOpt was set.
+// When a stream is taken over, it's the caller's responsibility to close the stream.
+type HTTPStreamer interface {
+ HTTPStream() Stream
+}
+
+type StreamCreator interface {
+ // Context returns a context that is cancelled when the underlying connection is closed.
+ Context() context.Context
+ OpenStream() (quic.Stream, error)
+ OpenStreamSync(context.Context) (quic.Stream, error)
+ OpenUniStream() (quic.SendStream, error)
+ OpenUniStreamSync(context.Context) (quic.SendStream, error)
+ LocalAddr() net.Addr
+ RemoteAddr() net.Addr
+ ConnectionState() quic.ConnectionState
+}
+
+var _ StreamCreator = quic.Connection(nil)
+
+// A Hijacker allows hijacking of the stream creating part of a quic.Session from a http.Response.Body.
+// It is used by WebTransport to create WebTransport streams after a session has been established.
+type Hijacker interface {
+ StreamCreator() StreamCreator
+}
+
+// The body of a http.Request or http.Response.
+type body struct {
+ str quic.Stream
+
+ wasHijacked bool // set when HTTPStream is called
+}
+
+var (
+ _ io.ReadCloser = &body{}
+ _ HTTPStreamer = &body{}
+)
+
+func newRequestBody(str Stream) *body {
+ return &body{str: str}
+}
+
+func (r *body) HTTPStream() Stream {
+ r.wasHijacked = true
+ return r.str
+}
+
+func (r *body) wasStreamHijacked() bool {
+ return r.wasHijacked
+}
+
+func (r *body) Read(b []byte) (int, error) {
+ return r.str.Read(b)
+}
+
+func (r *body) Close() error {
+ r.str.CancelRead(quic.StreamErrorCode(errorRequestCanceled))
+ return nil
+}
+
+type hijackableBody struct {
+ body
+ conn quic.Connection // only needed to implement Hijacker
+
+ // only set for the http.Response
+ // The channel is closed when the user is done with this response:
+ // either when Read() errors, or when Close() is called.
+ reqDone chan<- struct{}
+ reqDoneClosed bool
+}
+
+var (
+ _ Hijacker = &hijackableBody{}
+ _ HTTPStreamer = &hijackableBody{}
+)
+
+func newResponseBody(str Stream, conn quic.Connection, done chan<- struct{}) *hijackableBody {
+ return &hijackableBody{
+ body: body{
+ str: str,
+ },
+ reqDone: done,
+ conn: conn,
+ }
+}
+
+func (r *hijackableBody) StreamCreator() StreamCreator {
+ return r.conn
+}
+
+func (r *hijackableBody) Read(b []byte) (int, error) {
+ n, err := r.str.Read(b)
+ if err != nil {
+ r.requestDone()
+ }
+ return n, err
+}
+
+func (r *hijackableBody) requestDone() {
+ if r.reqDoneClosed || r.reqDone == nil {
+ return
+ }
+ if r.reqDone != nil {
+ close(r.reqDone)
+ }
+ r.reqDoneClosed = true
+}
+
+func (r *body) StreamID() quic.StreamID {
+ return r.str.StreamID()
+}
+
+func (r *hijackableBody) Close() error {
+ r.requestDone()
+ // If the EOF was read, CancelRead() is a no-op.
+ r.str.CancelRead(quic.StreamErrorCode(errorRequestCanceled))
+ return nil
+}
+
+func (r *hijackableBody) HTTPStream() Stream {
+ return r.str
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/capsule.go b/vendor/github.com/quic-go/quic-go/http3/capsule.go
new file mode 100644
index 000000000..7bdcd4e57
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/capsule.go
@@ -0,0 +1,55 @@
+package http3
+
+import (
+ "io"
+
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+// CapsuleType is the type of the capsule.
+type CapsuleType uint64
+
+type exactReader struct {
+ R *io.LimitedReader
+}
+
+func (r *exactReader) Read(b []byte) (int, error) {
+ n, err := r.R.Read(b)
+ if r.R.N > 0 {
+ return n, io.ErrUnexpectedEOF
+ }
+ return n, err
+}
+
+// ParseCapsule parses the header of a Capsule.
+// It returns an io.LimitedReader that can be used to read the Capsule value.
+// The Capsule value must be read entirely (i.e. until the io.EOF) before using r again.
+func ParseCapsule(r quicvarint.Reader) (CapsuleType, io.Reader, error) {
+ ct, err := quicvarint.Read(r)
+ if err != nil {
+ if err == io.EOF {
+ return 0, nil, io.ErrUnexpectedEOF
+ }
+ return 0, nil, err
+ }
+ l, err := quicvarint.Read(r)
+ if err != nil {
+ if err == io.EOF {
+ return 0, nil, io.ErrUnexpectedEOF
+ }
+ return 0, nil, err
+ }
+ return CapsuleType(ct), &exactReader{R: io.LimitReader(r, int64(l)).(*io.LimitedReader)}, nil
+}
+
+// WriteCapsule writes a capsule
+func WriteCapsule(w quicvarint.Writer, ct CapsuleType, value []byte) error {
+ b := make([]byte, 0, 16)
+ b = quicvarint.Append(b, uint64(ct))
+ b = quicvarint.Append(b, uint64(len(value)))
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ _, err := w.Write(value)
+ return err
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/client.go b/vendor/github.com/quic-go/quic-go/http3/client.go
new file mode 100644
index 000000000..c63505e1f
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/client.go
@@ -0,0 +1,457 @@
+package http3
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/quicvarint"
+
+ "github.com/quic-go/qpack"
+)
+
+// MethodGet0RTT allows a GET request to be sent using 0-RTT.
+// Note that 0-RTT data doesn't provide replay protection.
+const MethodGet0RTT = "GET_0RTT"
+
+const (
+ defaultUserAgent = "quic-go HTTP/3"
+ defaultMaxResponseHeaderBytes = 10 * 1 << 20 // 10 MB
+)
+
+var defaultQuicConfig = &quic.Config{
+ MaxIncomingStreams: -1, // don't allow the server to create bidirectional streams
+ KeepAlivePeriod: 10 * time.Second,
+ Versions: []protocol.VersionNumber{protocol.VersionTLS},
+}
+
+type dialFunc func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error)
+
+var dialAddr = quic.DialAddrEarlyContext
+
+type roundTripperOpts struct {
+ DisableCompression bool
+ EnableDatagram bool
+ MaxHeaderBytes int64
+ AdditionalSettings map[uint64]uint64
+ StreamHijacker func(FrameType, quic.Connection, quic.Stream, error) (hijacked bool, err error)
+ UniStreamHijacker func(StreamType, quic.Connection, quic.ReceiveStream, error) (hijacked bool)
+}
+
+// client is a HTTP3 client doing requests
+type client struct {
+ tlsConf *tls.Config
+ config *quic.Config
+ opts *roundTripperOpts
+
+ dialOnce sync.Once
+ dialer dialFunc
+ handshakeErr error
+
+ requestWriter *requestWriter
+
+ decoder *qpack.Decoder
+
+ hostname string
+ conn atomic.Pointer[quic.EarlyConnection]
+
+ logger utils.Logger
+}
+
+var _ roundTripCloser = &client{}
+
+func newClient(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) {
+ if conf == nil {
+ conf = defaultQuicConfig.Clone()
+ } else if len(conf.Versions) == 0 {
+ conf = conf.Clone()
+ conf.Versions = []quic.VersionNumber{defaultQuicConfig.Versions[0]}
+ }
+ if len(conf.Versions) != 1 {
+ return nil, errors.New("can only use a single QUIC version for dialing a HTTP/3 connection")
+ }
+ if conf.MaxIncomingStreams == 0 {
+ conf.MaxIncomingStreams = -1 // don't allow any bidirectional streams
+ }
+ conf.EnableDatagrams = opts.EnableDatagram
+ logger := utils.DefaultLogger.WithPrefix("h3 client")
+
+ if tlsConf == nil {
+ tlsConf = &tls.Config{}
+ } else {
+ tlsConf = tlsConf.Clone()
+ }
+ // Replace existing ALPNs by H3
+ tlsConf.NextProtos = []string{versionToALPN(conf.Versions[0])}
+
+ return &client{
+ hostname: authorityAddr("https", hostname),
+ tlsConf: tlsConf,
+ requestWriter: newRequestWriter(logger),
+ decoder: qpack.NewDecoder(func(hf qpack.HeaderField) {}),
+ config: conf,
+ opts: opts,
+ dialer: dialer,
+ logger: logger,
+ }, nil
+}
+
+func (c *client) dial(ctx context.Context) error {
+ var err error
+ var conn quic.EarlyConnection
+ if c.dialer != nil {
+ conn, err = c.dialer(ctx, c.hostname, c.tlsConf, c.config)
+ } else {
+ conn, err = dialAddr(ctx, c.hostname, c.tlsConf, c.config)
+ }
+ if err != nil {
+ return err
+ }
+ c.conn.Store(&conn)
+
+ // send the SETTINGs frame, using 0-RTT data, if possible
+ go func() {
+ if err := c.setupConn(conn); err != nil {
+ c.logger.Debugf("Setting up connection failed: %s", err)
+ conn.CloseWithError(quic.ApplicationErrorCode(errorInternalError), "")
+ }
+ }()
+
+ if c.opts.StreamHijacker != nil {
+ go c.handleBidirectionalStreams(conn)
+ }
+ go c.handleUnidirectionalStreams(conn)
+ return nil
+}
+
+func (c *client) setupConn(conn quic.EarlyConnection) error {
+ // open the control stream
+ str, err := conn.OpenUniStream()
+ if err != nil {
+ return err
+ }
+ b := make([]byte, 0, 64)
+ b = quicvarint.Append(b, streamTypeControlStream)
+ // send the SETTINGS frame
+ b = (&settingsFrame{Datagram: c.opts.EnableDatagram, Other: c.opts.AdditionalSettings}).Append(b)
+ _, err = str.Write(b)
+ return err
+}
+
+func (c *client) handleBidirectionalStreams(conn quic.EarlyConnection) {
+ for {
+ str, err := conn.AcceptStream(context.Background())
+ if err != nil {
+ c.logger.Debugf("accepting bidirectional stream failed: %s", err)
+ return
+ }
+ go func(str quic.Stream) {
+ _, err := parseNextFrame(str, func(ft FrameType, e error) (processed bool, err error) {
+ return c.opts.StreamHijacker(ft, conn, str, e)
+ })
+ if err == errHijacked {
+ return
+ }
+ if err != nil {
+ c.logger.Debugf("error handling stream: %s", err)
+ }
+ conn.CloseWithError(quic.ApplicationErrorCode(errorFrameUnexpected), "received HTTP/3 frame on bidirectional stream")
+ }(str)
+ }
+}
+
+func (c *client) handleUnidirectionalStreams(conn quic.EarlyConnection) {
+ for {
+ str, err := conn.AcceptUniStream(context.Background())
+ if err != nil {
+ c.logger.Debugf("accepting unidirectional stream failed: %s", err)
+ return
+ }
+
+ go func(str quic.ReceiveStream) {
+ streamType, err := quicvarint.Read(quicvarint.NewReader(str))
+ if err != nil {
+ if c.opts.UniStreamHijacker != nil && c.opts.UniStreamHijacker(StreamType(streamType), conn, str, err) {
+ return
+ }
+ c.logger.Debugf("reading stream type on stream %d failed: %s", str.StreamID(), err)
+ return
+ }
+ // We're only interested in the control stream here.
+ switch streamType {
+ case streamTypeControlStream:
+ case streamTypeQPACKEncoderStream, streamTypeQPACKDecoderStream:
+ // Our QPACK implementation doesn't use the dynamic table yet.
+ // TODO: check that only one stream of each type is opened.
+ return
+ case streamTypePushStream:
+ // We never increased the Push ID, so we don't expect any push streams.
+ conn.CloseWithError(quic.ApplicationErrorCode(errorIDError), "")
+ return
+ default:
+ if c.opts.UniStreamHijacker != nil && c.opts.UniStreamHijacker(StreamType(streamType), conn, str, nil) {
+ return
+ }
+ str.CancelRead(quic.StreamErrorCode(errorStreamCreationError))
+ return
+ }
+ f, err := parseNextFrame(str, nil)
+ if err != nil {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorFrameError), "")
+ return
+ }
+ sf, ok := f.(*settingsFrame)
+ if !ok {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorMissingSettings), "")
+ return
+ }
+ if !sf.Datagram {
+ return
+ }
+ // If datagram support was enabled on our side as well as on the server side,
+ // we can expect it to have been negotiated both on the transport and on the HTTP/3 layer.
+ // Note: ConnectionState() will block until the handshake is complete (relevant when using 0-RTT).
+ if c.opts.EnableDatagram && !conn.ConnectionState().SupportsDatagrams {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorSettingsError), "missing QUIC Datagram support")
+ }
+ }(str)
+ }
+}
+
+func (c *client) Close() error {
+ conn := c.conn.Load()
+ if conn == nil {
+ return nil
+ }
+ return (*conn).CloseWithError(quic.ApplicationErrorCode(errorNoError), "")
+}
+
+func (c *client) maxHeaderBytes() uint64 {
+ if c.opts.MaxHeaderBytes <= 0 {
+ return defaultMaxResponseHeaderBytes
+ }
+ return uint64(c.opts.MaxHeaderBytes)
+}
+
+// RoundTripOpt executes a request and returns a response
+func (c *client) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if authorityAddr("https", hostnameFromRequest(req)) != c.hostname {
+ return nil, fmt.Errorf("http3 client BUG: RoundTripOpt called for the wrong client (expected %s, got %s)", c.hostname, req.Host)
+ }
+
+ c.dialOnce.Do(func() {
+ c.handshakeErr = c.dial(req.Context())
+ })
+ if c.handshakeErr != nil {
+ return nil, c.handshakeErr
+ }
+
+ // At this point, c.conn is guaranteed to be set.
+ conn := *c.conn.Load()
+
+ // Immediately send out this request, if this is a 0-RTT request.
+ if req.Method == MethodGet0RTT {
+ req.Method = http.MethodGet
+ } else {
+ // wait for the handshake to complete
+ select {
+ case <-conn.HandshakeComplete().Done():
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ }
+ }
+
+ str, err := conn.OpenStreamSync(req.Context())
+ if err != nil {
+ return nil, err
+ }
+
+ // Request Cancellation:
+ // This go routine keeps running even after RoundTripOpt() returns.
+ // It is shut down when the application is done processing the body.
+ reqDone := make(chan struct{})
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ select {
+ case <-req.Context().Done():
+ str.CancelWrite(quic.StreamErrorCode(errorRequestCanceled))
+ str.CancelRead(quic.StreamErrorCode(errorRequestCanceled))
+ case <-reqDone:
+ }
+ }()
+
+ doneChan := reqDone
+ if opt.DontCloseRequestStream {
+ doneChan = nil
+ }
+ rsp, rerr := c.doRequest(req, conn, str, opt, doneChan)
+ if rerr.err != nil { // if any error occurred
+ close(reqDone)
+ <-done
+ if rerr.streamErr != 0 { // if it was a stream error
+ str.CancelWrite(quic.StreamErrorCode(rerr.streamErr))
+ }
+ if rerr.connErr != 0 { // if it was a connection error
+ var reason string
+ if rerr.err != nil {
+ reason = rerr.err.Error()
+ }
+ conn.CloseWithError(quic.ApplicationErrorCode(rerr.connErr), reason)
+ }
+ return nil, rerr.err
+ }
+ if opt.DontCloseRequestStream {
+ close(reqDone)
+ <-done
+ }
+ return rsp, rerr.err
+}
+
+func (c *client) sendRequestBody(str Stream, body io.ReadCloser) error {
+ defer body.Close()
+ b := make([]byte, bodyCopyBufferSize)
+ for {
+ n, rerr := body.Read(b)
+ if n == 0 {
+ if rerr == nil {
+ continue
+ }
+ if rerr == io.EOF {
+ break
+ }
+ }
+ if _, err := str.Write(b[:n]); err != nil {
+ return err
+ }
+ if rerr != nil {
+ if rerr == io.EOF {
+ break
+ }
+ str.CancelWrite(quic.StreamErrorCode(errorRequestCanceled))
+ return rerr
+ }
+ }
+ return nil
+}
+
+func (c *client) doRequest(req *http.Request, conn quic.EarlyConnection, str quic.Stream, opt RoundTripOpt, reqDone chan<- struct{}) (*http.Response, requestError) {
+ var requestGzip bool
+ if !c.opts.DisableCompression && req.Method != "HEAD" && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" {
+ requestGzip = true
+ }
+ if err := c.requestWriter.WriteRequestHeader(str, req, requestGzip); err != nil {
+ return nil, newStreamError(errorInternalError, err)
+ }
+
+ if req.Body == nil && !opt.DontCloseRequestStream {
+ str.Close()
+ }
+
+ hstr := newStream(str, func() { conn.CloseWithError(quic.ApplicationErrorCode(errorFrameUnexpected), "") })
+ if req.Body != nil {
+ // send the request body asynchronously
+ go func() {
+ if err := c.sendRequestBody(hstr, req.Body); err != nil {
+ c.logger.Errorf("Error writing request: %s", err)
+ }
+ if !opt.DontCloseRequestStream {
+ hstr.Close()
+ }
+ }()
+ }
+
+ frame, err := parseNextFrame(str, nil)
+ if err != nil {
+ return nil, newStreamError(errorFrameError, err)
+ }
+ hf, ok := frame.(*headersFrame)
+ if !ok {
+ return nil, newConnError(errorFrameUnexpected, errors.New("expected first frame to be a HEADERS frame"))
+ }
+ if hf.Length > c.maxHeaderBytes() {
+ return nil, newStreamError(errorFrameError, fmt.Errorf("HEADERS frame too large: %d bytes (max: %d)", hf.Length, c.maxHeaderBytes()))
+ }
+ headerBlock := make([]byte, hf.Length)
+ if _, err := io.ReadFull(str, headerBlock); err != nil {
+ return nil, newStreamError(errorRequestIncomplete, err)
+ }
+ hfs, err := c.decoder.DecodeFull(headerBlock)
+ if err != nil {
+ // TODO: use the right error code
+ return nil, newConnError(errorGeneralProtocolError, err)
+ }
+
+ connState := qtls.ToTLSConnectionState(conn.ConnectionState().TLS)
+ res := &http.Response{
+ Proto: "HTTP/3.0",
+ ProtoMajor: 3,
+ Header: http.Header{},
+ TLS: &connState,
+ Request: req,
+ }
+ for _, hf := range hfs {
+ switch hf.Name {
+ case ":status":
+ status, err := strconv.Atoi(hf.Value)
+ if err != nil {
+ return nil, newStreamError(errorGeneralProtocolError, errors.New("malformed non-numeric status pseudo header"))
+ }
+ res.StatusCode = status
+ res.Status = hf.Value + " " + http.StatusText(status)
+ default:
+ res.Header.Add(hf.Name, hf.Value)
+ }
+ }
+ respBody := newResponseBody(hstr, conn, reqDone)
+
+ // Rules for when to set Content-Length are defined in https://tools.ietf.org/html/rfc7230#section-3.3.2.
+ _, hasTransferEncoding := res.Header["Transfer-Encoding"]
+ isInformational := res.StatusCode >= 100 && res.StatusCode < 200
+ isNoContent := res.StatusCode == http.StatusNoContent
+ isSuccessfulConnect := req.Method == http.MethodConnect && res.StatusCode >= 200 && res.StatusCode < 300
+ if !hasTransferEncoding && !isInformational && !isNoContent && !isSuccessfulConnect {
+ res.ContentLength = -1
+ if clens, ok := res.Header["Content-Length"]; ok && len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ }
+ }
+ }
+
+ if requestGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = newGzipReader(respBody)
+ res.Uncompressed = true
+ } else {
+ res.Body = respBody
+ }
+
+ return res, requestError{}
+}
+
+func (c *client) HandshakeComplete() bool {
+ conn := c.conn.Load()
+ if conn == nil {
+ return false
+ }
+ select {
+ case <-(*conn).HandshakeComplete().Done():
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/error_codes.go b/vendor/github.com/quic-go/quic-go/http3/error_codes.go
new file mode 100644
index 000000000..5df9b5df6
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/error_codes.go
@@ -0,0 +1,73 @@
+package http3
+
+import (
+ "fmt"
+
+ "github.com/quic-go/quic-go"
+)
+
+type errorCode quic.ApplicationErrorCode
+
+const (
+ errorNoError errorCode = 0x100
+ errorGeneralProtocolError errorCode = 0x101
+ errorInternalError errorCode = 0x102
+ errorStreamCreationError errorCode = 0x103
+ errorClosedCriticalStream errorCode = 0x104
+ errorFrameUnexpected errorCode = 0x105
+ errorFrameError errorCode = 0x106
+ errorExcessiveLoad errorCode = 0x107
+ errorIDError errorCode = 0x108
+ errorSettingsError errorCode = 0x109
+ errorMissingSettings errorCode = 0x10a
+ errorRequestRejected errorCode = 0x10b
+ errorRequestCanceled errorCode = 0x10c
+ errorRequestIncomplete errorCode = 0x10d
+ errorMessageError errorCode = 0x10e
+ errorConnectError errorCode = 0x10f
+ errorVersionFallback errorCode = 0x110
+ errorDatagramError errorCode = 0x4a1268
+)
+
+func (e errorCode) String() string {
+ switch e {
+ case errorNoError:
+ return "H3_NO_ERROR"
+ case errorGeneralProtocolError:
+ return "H3_GENERAL_PROTOCOL_ERROR"
+ case errorInternalError:
+ return "H3_INTERNAL_ERROR"
+ case errorStreamCreationError:
+ return "H3_STREAM_CREATION_ERROR"
+ case errorClosedCriticalStream:
+ return "H3_CLOSED_CRITICAL_STREAM"
+ case errorFrameUnexpected:
+ return "H3_FRAME_UNEXPECTED"
+ case errorFrameError:
+ return "H3_FRAME_ERROR"
+ case errorExcessiveLoad:
+ return "H3_EXCESSIVE_LOAD"
+ case errorIDError:
+ return "H3_ID_ERROR"
+ case errorSettingsError:
+ return "H3_SETTINGS_ERROR"
+ case errorMissingSettings:
+ return "H3_MISSING_SETTINGS"
+ case errorRequestRejected:
+ return "H3_REQUEST_REJECTED"
+ case errorRequestCanceled:
+ return "H3_REQUEST_CANCELLED"
+ case errorRequestIncomplete:
+ return "H3_INCOMPLETE_REQUEST"
+ case errorMessageError:
+ return "H3_MESSAGE_ERROR"
+ case errorConnectError:
+ return "H3_CONNECT_ERROR"
+ case errorVersionFallback:
+ return "H3_VERSION_FALLBACK"
+ case errorDatagramError:
+ return "H3_DATAGRAM_ERROR"
+ default:
+ return fmt.Sprintf("unknown error code: %#x", uint16(e))
+ }
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/frames.go b/vendor/github.com/quic-go/quic-go/http3/frames.go
new file mode 100644
index 000000000..cdd97bc5e
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/frames.go
@@ -0,0 +1,164 @@
+package http3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+// FrameType is the frame type of a HTTP/3 frame
+type FrameType uint64
+
+type unknownFrameHandlerFunc func(FrameType, error) (processed bool, err error)
+
+type frame interface{}
+
+var errHijacked = errors.New("hijacked")
+
+func parseNextFrame(r io.Reader, unknownFrameHandler unknownFrameHandlerFunc) (frame, error) {
+ qr := quicvarint.NewReader(r)
+ for {
+ t, err := quicvarint.Read(qr)
+ if err != nil {
+ if unknownFrameHandler != nil {
+ hijacked, err := unknownFrameHandler(0, err)
+ if err != nil {
+ return nil, err
+ }
+ if hijacked {
+ return nil, errHijacked
+ }
+ }
+ return nil, err
+ }
+ // Call the unknownFrameHandler for frames not defined in the HTTP/3 spec
+ if t > 0xd && unknownFrameHandler != nil {
+ hijacked, err := unknownFrameHandler(FrameType(t), nil)
+ if err != nil {
+ return nil, err
+ }
+ if hijacked {
+ return nil, errHijacked
+ }
+ // If the unknownFrameHandler didn't process the frame, it is our responsibility to skip it.
+ }
+ l, err := quicvarint.Read(qr)
+ if err != nil {
+ return nil, err
+ }
+
+ switch t {
+ case 0x0:
+ return &dataFrame{Length: l}, nil
+ case 0x1:
+ return &headersFrame{Length: l}, nil
+ case 0x4:
+ return parseSettingsFrame(r, l)
+ case 0x3: // CANCEL_PUSH
+ case 0x5: // PUSH_PROMISE
+ case 0x7: // GOAWAY
+ case 0xd: // MAX_PUSH_ID
+ }
+ // skip over unknown frames
+ if _, err := io.CopyN(io.Discard, qr, int64(l)); err != nil {
+ return nil, err
+ }
+ }
+}
+
+type dataFrame struct {
+ Length uint64
+}
+
+func (f *dataFrame) Append(b []byte) []byte {
+ b = quicvarint.Append(b, 0x0)
+ return quicvarint.Append(b, f.Length)
+}
+
+type headersFrame struct {
+ Length uint64
+}
+
+func (f *headersFrame) Append(b []byte) []byte {
+ b = quicvarint.Append(b, 0x1)
+ return quicvarint.Append(b, f.Length)
+}
+
+const settingDatagram = 0xffd277
+
+type settingsFrame struct {
+ Datagram bool
+ Other map[uint64]uint64 // all settings that we don't explicitly recognize
+}
+
+func parseSettingsFrame(r io.Reader, l uint64) (*settingsFrame, error) {
+ if l > 8*(1<<10) {
+ return nil, fmt.Errorf("unexpected size for SETTINGS frame: %d", l)
+ }
+ buf := make([]byte, l)
+ if _, err := io.ReadFull(r, buf); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ return nil, io.EOF
+ }
+ return nil, err
+ }
+ frame := &settingsFrame{}
+ b := bytes.NewReader(buf)
+ var readDatagram bool
+ for b.Len() > 0 {
+ id, err := quicvarint.Read(b)
+ if err != nil { // should not happen. We allocated the whole frame already.
+ return nil, err
+ }
+ val, err := quicvarint.Read(b)
+ if err != nil { // should not happen. We allocated the whole frame already.
+ return nil, err
+ }
+
+ switch id {
+ case settingDatagram:
+ if readDatagram {
+ return nil, fmt.Errorf("duplicate setting: %d", id)
+ }
+ readDatagram = true
+ if val != 0 && val != 1 {
+ return nil, fmt.Errorf("invalid value for H3_DATAGRAM: %d", val)
+ }
+ frame.Datagram = val == 1
+ default:
+ if _, ok := frame.Other[id]; ok {
+ return nil, fmt.Errorf("duplicate setting: %d", id)
+ }
+ if frame.Other == nil {
+ frame.Other = make(map[uint64]uint64)
+ }
+ frame.Other[id] = val
+ }
+ }
+ return frame, nil
+}
+
+func (f *settingsFrame) Append(b []byte) []byte {
+ b = quicvarint.Append(b, 0x4)
+ var l protocol.ByteCount
+ for id, val := range f.Other {
+ l += quicvarint.Len(id) + quicvarint.Len(val)
+ }
+ if f.Datagram {
+ l += quicvarint.Len(settingDatagram) + quicvarint.Len(1)
+ }
+ b = quicvarint.Append(b, uint64(l))
+ if f.Datagram {
+ b = quicvarint.Append(b, settingDatagram)
+ b = quicvarint.Append(b, 1)
+ }
+ for id, val := range f.Other {
+ b = quicvarint.Append(b, id)
+ b = quicvarint.Append(b, val)
+ }
+ return b
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/gzip_reader.go b/vendor/github.com/quic-go/quic-go/http3/gzip_reader.go
new file mode 100644
index 000000000..01983ac77
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/gzip_reader.go
@@ -0,0 +1,39 @@
+package http3
+
+// copied from net/transport.go
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+import (
+ "compress/gzip"
+ "io"
+)
+
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func newGzipReader(body io.ReadCloser) io.ReadCloser {
+ return &gzipReader{body: body}
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/http_stream.go b/vendor/github.com/quic-go/quic-go/http3/http_stream.go
new file mode 100644
index 000000000..2799e2b3c
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/http_stream.go
@@ -0,0 +1,76 @@
+package http3
+
+import (
+ "fmt"
+
+ "github.com/quic-go/quic-go"
+)
+
+// A Stream is a HTTP/3 stream.
+// When writing to and reading from the stream, data is framed in HTTP/3 DATA frames.
+type Stream quic.Stream
+
+// The stream conforms to the quic.Stream interface, but instead of writing to and reading directly
+// from the QUIC stream, it writes to and reads from the HTTP stream.
+type stream struct {
+ quic.Stream
+
+ buf []byte
+
+ onFrameError func()
+ bytesRemainingInFrame uint64
+}
+
+var _ Stream = &stream{}
+
+func newStream(str quic.Stream, onFrameError func()) *stream {
+ return &stream{
+ Stream: str,
+ onFrameError: onFrameError,
+ buf: make([]byte, 0, 16),
+ }
+}
+
+func (s *stream) Read(b []byte) (int, error) {
+ if s.bytesRemainingInFrame == 0 {
+ parseLoop:
+ for {
+ frame, err := parseNextFrame(s.Stream, nil)
+ if err != nil {
+ return 0, err
+ }
+ switch f := frame.(type) {
+ case *headersFrame:
+ // skip HEADERS frames
+ continue
+ case *dataFrame:
+ s.bytesRemainingInFrame = f.Length
+ break parseLoop
+ default:
+ s.onFrameError()
+ // parseNextFrame skips over unknown frame types
+ // Therefore, this condition is only entered when we parsed another known frame type.
+ return 0, fmt.Errorf("peer sent an unexpected frame: %T", f)
+ }
+ }
+ }
+
+ var n int
+ var err error
+ if s.bytesRemainingInFrame < uint64(len(b)) {
+ n, err = s.Stream.Read(b[:s.bytesRemainingInFrame])
+ } else {
+ n, err = s.Stream.Read(b)
+ }
+ s.bytesRemainingInFrame -= uint64(n)
+ return n, err
+}
+
+func (s *stream) Write(b []byte) (int, error) {
+ s.buf = s.buf[:0]
+ s.buf = (&dataFrame{Length: uint64(len(b))}).Append(s.buf)
+ if _, err := s.Stream.Write(s.buf); err != nil {
+ return 0, err
+ }
+ return s.Stream.Write(b)
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/request.go b/vendor/github.com/quic-go/quic-go/http3/request.go
new file mode 100644
index 000000000..9af25a570
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/request.go
@@ -0,0 +1,111 @@
+package http3
+
+import (
+ "errors"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/quic-go/qpack"
+)
+
+func requestFromHeaders(headers []qpack.HeaderField) (*http.Request, error) {
+ var path, authority, method, protocol, scheme, contentLengthStr string
+
+ httpHeaders := http.Header{}
+ for _, h := range headers {
+ switch h.Name {
+ case ":path":
+ path = h.Value
+ case ":method":
+ method = h.Value
+ case ":authority":
+ authority = h.Value
+ case ":protocol":
+ protocol = h.Value
+ case ":scheme":
+ scheme = h.Value
+ case "content-length":
+ contentLengthStr = h.Value
+ default:
+ if !h.IsPseudo() {
+ httpHeaders.Add(h.Name, h.Value)
+ }
+ }
+ }
+
+ // concatenate cookie headers, see https://tools.ietf.org/html/rfc6265#section-5.4
+ if len(httpHeaders["Cookie"]) > 0 {
+ httpHeaders.Set("Cookie", strings.Join(httpHeaders["Cookie"], "; "))
+ }
+
+ isConnect := method == http.MethodConnect
+ // Extended CONNECT, see https://datatracker.ietf.org/doc/html/rfc8441#section-4
+ isExtendedConnected := isConnect && protocol != ""
+ if isExtendedConnected {
+ if scheme == "" || path == "" || authority == "" {
+ return nil, errors.New("extended CONNECT: :scheme, :path and :authority must not be empty")
+ }
+ } else if isConnect {
+ if path != "" || authority == "" { // normal CONNECT
+ return nil, errors.New(":path must be empty and :authority must not be empty")
+ }
+ } else if len(path) == 0 || len(authority) == 0 || len(method) == 0 {
+ return nil, errors.New(":path, :authority and :method must not be empty")
+ }
+
+ var u *url.URL
+ var requestURI string
+ var err error
+
+ if isConnect {
+ u = &url.URL{}
+ if isExtendedConnected {
+ u, err = url.ParseRequestURI(path)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ u.Path = path
+ }
+ u.Scheme = scheme
+ u.Host = authority
+ requestURI = authority
+ } else {
+ protocol = "HTTP/3.0"
+ u, err = url.ParseRequestURI(path)
+ if err != nil {
+ return nil, err
+ }
+ requestURI = path
+ }
+
+ var contentLength int64
+ if len(contentLengthStr) > 0 {
+ contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &http.Request{
+ Method: method,
+ URL: u,
+ Proto: protocol,
+ ProtoMajor: 3,
+ ProtoMinor: 0,
+ Header: httpHeaders,
+ Body: nil,
+ ContentLength: contentLength,
+ Host: authority,
+ RequestURI: requestURI,
+ }, nil
+}
+
+func hostnameFromRequest(req *http.Request) string {
+ if req.URL != nil {
+ return req.URL.Host
+ }
+ return ""
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/request_writer.go b/vendor/github.com/quic-go/quic-go/http3/request_writer.go
new file mode 100644
index 000000000..fcff6a1f4
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/request_writer.go
@@ -0,0 +1,283 @@
+package http3
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/idna"
+
+ "github.com/quic-go/qpack"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/internal/utils"
+)
+
+const bodyCopyBufferSize = 8 * 1024
+
+type requestWriter struct {
+ mutex sync.Mutex
+ encoder *qpack.Encoder
+ headerBuf *bytes.Buffer
+
+ logger utils.Logger
+}
+
+func newRequestWriter(logger utils.Logger) *requestWriter {
+ headerBuf := &bytes.Buffer{}
+ encoder := qpack.NewEncoder(headerBuf)
+ return &requestWriter{
+ encoder: encoder,
+ headerBuf: headerBuf,
+ logger: logger,
+ }
+}
+
+func (w *requestWriter) WriteRequestHeader(str quic.Stream, req *http.Request, gzip bool) error {
+ // TODO: figure out how to add support for trailers
+ buf := &bytes.Buffer{}
+ if err := w.writeHeaders(buf, req, gzip); err != nil {
+ return err
+ }
+ _, err := str.Write(buf.Bytes())
+ return err
+}
+
+func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool) error {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
+ defer w.encoder.Close()
+ defer w.headerBuf.Reset()
+
+ if err := w.encodeHeaders(req, gzip, "", actualContentLength(req)); err != nil {
+ return err
+ }
+
+ b := make([]byte, 0, 128)
+ b = (&headersFrame{Length: uint64(w.headerBuf.Len())}).Append(b)
+ if _, err := wr.Write(b); err != nil {
+ return err
+ }
+ _, err := wr.Write(w.headerBuf.Bytes())
+ return err
+}
+
+// copied from net/transport.go
+// Modified to support Extended CONNECT:
+// Contrary to what the godoc for the http.Request says,
+// we do respect the Proto field if the method is CONNECT.
+func (w *requestWriter) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) error {
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return err
+ }
+
+ // http.NewRequest sets this field to HTTP/1.1
+ isExtendedConnect := req.Method == http.MethodConnect && req.Proto != "" && req.Proto != "HTTP/1.1"
+
+ var path string
+ if req.Method != http.MethodConnect || isExtendedConnect {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ return fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ }
+ }
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ f(":method", req.Method)
+ if req.Method != http.MethodConnect || isExtendedConnect {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if isExtendedConnect {
+ f(":protocol", req.Proto)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") ||
+ strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") ||
+ strings.EqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if strings.EqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ f("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", defaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ // TODO: check maximum header list size
+ // if hlSize > cc.peerMaxHeaderListSize {
+ // return errRequestHeaderListSize
+ // }
+
+ // trace := httptrace.ContextClientTrace(req.Context())
+ // traceHeaders := traceHasWroteHeaderField(trace)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name = strings.ToLower(name)
+ w.encoder.WriteField(qpack.HeaderField{Name: name, Value: value})
+ // if traceHeaders {
+ // traceWroteHeaderField(trace, name, value)
+ // }
+ })
+
+ return nil
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(scheme string, authority string) (addr string) {
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ host = authority
+ }
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
+ }
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
+ return net.JoinHostPort(host, port)
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// *) a non-empty string starting with '/'
+// *) the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+ if req.Body == nil {
+ return 0
+ }
+ if req.ContentLength != 0 {
+ return req.ContentLength
+ }
+ return -1
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/response_writer.go b/vendor/github.com/quic-go/quic-go/http3/response_writer.go
new file mode 100644
index 000000000..5cc329239
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/response_writer.go
@@ -0,0 +1,137 @@
+package http3
+
+import (
+ "bufio"
+ "bytes"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/internal/utils"
+
+ "github.com/quic-go/qpack"
+)
+
+type responseWriter struct {
+ conn quic.Connection
+ bufferedStr *bufio.Writer
+ buf []byte
+
+ header http.Header
+ status int // status code passed to WriteHeader
+ headerWritten bool
+
+ logger utils.Logger
+}
+
+var (
+ _ http.ResponseWriter = &responseWriter{}
+ _ http.Flusher = &responseWriter{}
+ _ Hijacker = &responseWriter{}
+)
+
+func newResponseWriter(str quic.Stream, conn quic.Connection, logger utils.Logger) *responseWriter {
+ return &responseWriter{
+ header: http.Header{},
+ buf: make([]byte, 16),
+ conn: conn,
+ bufferedStr: bufio.NewWriter(str),
+ logger: logger,
+ }
+}
+
+func (w *responseWriter) Header() http.Header {
+ return w.header
+}
+
+func (w *responseWriter) WriteHeader(status int) {
+ if w.headerWritten {
+ return
+ }
+
+ if status < 100 || status >= 200 {
+ w.headerWritten = true
+ }
+ w.status = status
+
+ var headers bytes.Buffer
+ enc := qpack.NewEncoder(&headers)
+ enc.WriteField(qpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
+
+ for k, v := range w.header {
+ for index := range v {
+ enc.WriteField(qpack.HeaderField{Name: strings.ToLower(k), Value: v[index]})
+ }
+ }
+
+ w.buf = w.buf[:0]
+ w.buf = (&headersFrame{Length: uint64(headers.Len())}).Append(w.buf)
+ w.logger.Infof("Responding with %d", status)
+ if _, err := w.bufferedStr.Write(w.buf); err != nil {
+ w.logger.Errorf("could not write headers frame: %s", err.Error())
+ }
+ if _, err := w.bufferedStr.Write(headers.Bytes()); err != nil {
+ w.logger.Errorf("could not write header frame payload: %s", err.Error())
+ }
+ if !w.headerWritten {
+ w.Flush()
+ }
+}
+
+func (w *responseWriter) Write(p []byte) (int, error) {
+ bodyAllowed := bodyAllowedForStatus(w.status)
+ if !w.headerWritten {
+ // If body is not allowed, we don't need to (and we can't) sniff the content type.
+ if bodyAllowed {
+ // If no content type, apply sniffing algorithm to body.
+ // We can't use `w.header.Get` here since if the Content-Type was set to nil, we shoundn't do sniffing.
+ _, haveType := w.header["Content-Type"]
+
+ // If the Transfer-Encoding or Content-Encoding was set and is non-blank,
+ // we shouldn't sniff the body.
+ hasTE := w.header.Get("Transfer-Encoding") != ""
+ hasCE := w.header.Get("Content-Encoding") != ""
+ if !hasCE && !haveType && !hasTE && len(p) > 0 {
+ w.header.Set("Content-Type", http.DetectContentType(p))
+ }
+ }
+ w.WriteHeader(http.StatusOK)
+ bodyAllowed = true
+ }
+ if !bodyAllowed {
+ return 0, http.ErrBodyNotAllowed
+ }
+ df := &dataFrame{Length: uint64(len(p))}
+ w.buf = w.buf[:0]
+ w.buf = df.Append(w.buf)
+ if _, err := w.bufferedStr.Write(w.buf); err != nil {
+ return 0, err
+ }
+ return w.bufferedStr.Write(p)
+}
+
+func (w *responseWriter) Flush() {
+ if err := w.bufferedStr.Flush(); err != nil {
+ w.logger.Errorf("could not flush to stream: %s", err.Error())
+ }
+}
+
+func (w *responseWriter) StreamCreator() StreamCreator {
+ return w.conn
+}
+
+// copied from http2/http2.go
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == http.StatusNoContent:
+ return false
+ case status == http.StatusNotModified:
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/roundtrip.go b/vendor/github.com/quic-go/quic-go/http3/roundtrip.go
new file mode 100644
index 000000000..d9812abb5
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/roundtrip.go
@@ -0,0 +1,247 @@
+package http3
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http/httpguts"
+
+ "github.com/quic-go/quic-go"
+)
+
+type roundTripCloser interface {
+ RoundTripOpt(*http.Request, RoundTripOpt) (*http.Response, error)
+ HandshakeComplete() bool
+ io.Closer
+}
+
+// RoundTripper implements the http.RoundTripper interface
+type RoundTripper struct {
+ mutex sync.Mutex
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // QuicConfig is the quic.Config used for dialing new connections.
+ // If nil, reasonable default values will be used.
+ QuicConfig *quic.Config
+
+ // Enable support for HTTP/3 datagrams.
+ // If set to true, QuicConfig.EnableDatagram will be set.
+ // See https://www.ietf.org/archive/id/draft-schinazi-masque-h3-datagram-02.html.
+ EnableDatagrams bool
+
+ // Additional HTTP/3 settings.
+ // It is invalid to specify any settings defined by the HTTP/3 draft and the datagram draft.
+ AdditionalSettings map[uint64]uint64
+
+ // When set, this callback is called for the first unknown frame parsed on a bidirectional stream.
+ // It is called right after parsing the frame type.
+ // If parsing the frame type fails, the error is passed to the callback.
+ // In that case, the frame type will not be set.
+ // Callers can either ignore the frame and return control of the stream back to HTTP/3
+ // (by returning hijacked false).
+ // Alternatively, callers can take over the QUIC stream (by returning hijacked true).
+ StreamHijacker func(FrameType, quic.Connection, quic.Stream, error) (hijacked bool, err error)
+
+ // When set, this callback is called for unknown unidirectional stream of unknown stream type.
+ // If parsing the stream type fails, the error is passed to the callback.
+ // In that case, the stream type will not be set.
+ UniStreamHijacker func(StreamType, quic.Connection, quic.ReceiveStream, error) (hijacked bool)
+
+ // Dial specifies an optional dial function for creating QUIC
+ // connections for requests.
+ // If Dial is nil, quic.DialAddrEarlyContext will be used.
+ Dial func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error)
+
+ // MaxResponseHeaderBytes specifies a limit on how many response bytes are
+ // allowed in the server's response header.
+ // Zero means to use a default limit.
+ MaxResponseHeaderBytes int64
+
+ newClient func(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) // so we can mock it in tests
+ clients map[string]roundTripCloser
+}
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether the RoundTripper may create a new QUIC connection.
+ // If set true and no cached connection is available, RoundTripOpt will return ErrNoCachedConn.
+ OnlyCachedConn bool
+ // DontCloseRequestStream controls whether the request stream is closed after sending the request.
+ // If set, context cancellations have no effect after the response headers are received.
+ DontCloseRequestStream bool
+}
+
+var (
+ _ http.RoundTripper = &RoundTripper{}
+ _ io.Closer = &RoundTripper{}
+)
+
+// ErrNoCachedConn is returned when RoundTripper.OnlyCachedConn is set
+var ErrNoCachedConn = errors.New("http3: no cached connection was available")
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if req.URL == nil {
+ closeRequestBody(req)
+ return nil, errors.New("http3: nil Request.URL")
+ }
+ if req.URL.Host == "" {
+ closeRequestBody(req)
+ return nil, errors.New("http3: no Host in request URL")
+ }
+ if req.Header == nil {
+ closeRequestBody(req)
+ return nil, errors.New("http3: nil Request.Header")
+ }
+ if req.URL.Scheme != "https" {
+ closeRequestBody(req)
+ return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme)
+ }
+ for k, vv := range req.Header {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("http3: invalid http header field name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k)
+ }
+ }
+ }
+
+ if req.Method != "" && !validMethod(req.Method) {
+ closeRequestBody(req)
+ return nil, fmt.Errorf("http3: invalid method %q", req.Method)
+ }
+
+ hostname := authorityAddr("https", hostnameFromRequest(req))
+ cl, isReused, err := r.getClient(hostname, opt.OnlyCachedConn)
+ if err != nil {
+ return nil, err
+ }
+ rsp, err := cl.RoundTripOpt(req, opt)
+ if err != nil {
+ r.removeClient(hostname)
+ if isReused {
+ if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+ return r.RoundTripOpt(req, opt)
+ }
+ }
+ }
+ return rsp, err
+}
+
+// RoundTrip does a round trip.
+func (r *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ return r.RoundTripOpt(req, RoundTripOpt{})
+}
+
+func (r *RoundTripper) getClient(hostname string, onlyCached bool) (rtc roundTripCloser, isReused bool, err error) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ if r.clients == nil {
+ r.clients = make(map[string]roundTripCloser)
+ }
+
+ client, ok := r.clients[hostname]
+ if !ok {
+ if onlyCached {
+ return nil, false, ErrNoCachedConn
+ }
+ var err error
+ newCl := newClient
+ if r.newClient != nil {
+ newCl = r.newClient
+ }
+ client, err = newCl(
+ hostname,
+ r.TLSClientConfig,
+ &roundTripperOpts{
+ EnableDatagram: r.EnableDatagrams,
+ DisableCompression: r.DisableCompression,
+ MaxHeaderBytes: r.MaxResponseHeaderBytes,
+ StreamHijacker: r.StreamHijacker,
+ UniStreamHijacker: r.UniStreamHijacker,
+ },
+ r.QuicConfig,
+ r.Dial,
+ )
+ if err != nil {
+ return nil, false, err
+ }
+ r.clients[hostname] = client
+ } else if client.HandshakeComplete() {
+ isReused = true
+ }
+ return client, isReused, nil
+}
+
+func (r *RoundTripper) removeClient(hostname string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if r.clients == nil {
+ return
+ }
+ delete(r.clients, hostname)
+}
+
+// Close closes the QUIC connections that this RoundTripper has used
+func (r *RoundTripper) Close() error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for _, client := range r.clients {
+ if err := client.Close(); err != nil {
+ return err
+ }
+ }
+ r.clients = nil
+ return nil
+}
+
+func closeRequestBody(req *http.Request) {
+ if req.Body != nil {
+ req.Body.Close()
+ }
+}
+
+func validMethod(method string) bool {
+ /*
+ Method = "OPTIONS" ; Section 9.2
+ | "GET" ; Section 9.3
+ | "HEAD" ; Section 9.4
+ | "POST" ; Section 9.5
+ | "PUT" ; Section 9.6
+ | "DELETE" ; Section 9.7
+ | "TRACE" ; Section 9.8
+ | "CONNECT" ; Section 9.9
+ | extension-method
+ extension-method = token
+ token = 1*
+ */
+ return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1
+}
+
+// copied from net/http/http.go
+func isNotToken(r rune) bool {
+ return !httpguts.IsTokenRune(r)
+}
diff --git a/vendor/github.com/quic-go/quic-go/http3/server.go b/vendor/github.com/quic-go/quic-go/http3/server.go
new file mode 100644
index 000000000..e546a9306
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/http3/server.go
@@ -0,0 +1,752 @@
+package http3
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/internal/handshake"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/quicvarint"
+
+ "github.com/quic-go/qpack"
+)
+
+// allows mocking of quic.Listen and quic.ListenAddr
+var (
+ quicListen = quic.ListenEarly
+ quicListenAddr = quic.ListenAddrEarly
+)
+
+const (
+ // NextProtoH3Draft29 is the ALPN protocol negotiated during the TLS handshake, for QUIC draft 29.
+ NextProtoH3Draft29 = "h3-29"
+ // NextProtoH3 is the ALPN protocol negotiated during the TLS handshake, for QUIC v1 and v2.
+ NextProtoH3 = "h3"
+)
+
+// StreamType is the stream type of a unidirectional stream.
+type StreamType uint64
+
+const (
+ streamTypeControlStream = 0
+ streamTypePushStream = 1
+ streamTypeQPACKEncoderStream = 2
+ streamTypeQPACKDecoderStream = 3
+)
+
+func versionToALPN(v protocol.VersionNumber) string {
+ if v == protocol.Version1 || v == protocol.Version2 {
+ return NextProtoH3
+ }
+ if v == protocol.VersionTLS || v == protocol.VersionDraft29 {
+ return NextProtoH3Draft29
+ }
+ return ""
+}
+
+// ConfigureTLSConfig creates a new tls.Config which can be used
+// to create a quic.Listener meant for serving http3. The created
+// tls.Config adds the functionality of detecting the used QUIC version
+// in order to set the correct ALPN value for the http3 connection.
+func ConfigureTLSConfig(tlsConf *tls.Config) *tls.Config {
+ // The tls.Config used to setup the quic.Listener needs to have the GetConfigForClient callback set.
+ // That way, we can get the QUIC version and set the correct ALPN value.
+ return &tls.Config{
+ GetConfigForClient: func(ch *tls.ClientHelloInfo) (*tls.Config, error) {
+ // determine the ALPN from the QUIC version used
+ proto := NextProtoH3
+ if qconn, ok := ch.Conn.(handshake.ConnWithVersion); ok {
+ proto = versionToALPN(qconn.GetQUICVersion())
+ }
+ config := tlsConf
+ if tlsConf.GetConfigForClient != nil {
+ getConfigForClient := tlsConf.GetConfigForClient
+ var err error
+ conf, err := getConfigForClient(ch)
+ if err != nil {
+ return nil, err
+ }
+ if conf != nil {
+ config = conf
+ }
+ }
+ if config == nil {
+ return nil, nil
+ }
+ config = config.Clone()
+ config.NextProtos = []string{proto}
+ return config, nil
+ },
+ }
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string { return "quic-go/http3 context value " + k.name }
+
+// ServerContextKey is a context key. It can be used in HTTP
+// handlers with Context.Value to access the server that
+// started the handler. The associated value will be of
+// type *http3.Server.
+var ServerContextKey = &contextKey{"http3-server"}
+
+type requestError struct {
+ err error
+ streamErr errorCode
+ connErr errorCode
+}
+
+func newStreamError(code errorCode, err error) requestError {
+ return requestError{err: err, streamErr: code}
+}
+
+func newConnError(code errorCode, err error) requestError {
+ return requestError{err: err, connErr: code}
+}
+
+// listenerInfo contains info about specific listener added with addListener
+type listenerInfo struct {
+ port int // 0 means that no info about port is available
+}
+
+// Server is a HTTP/3 server.
+type Server struct {
+ // Addr optionally specifies the UDP address for the server to listen on,
+ // in the form "host:port".
+ //
+ // When used by ListenAndServe and ListenAndServeTLS methods, if empty,
+ // ":https" (port 443) is used. See net.Dial for details of the address
+ // format.
+ //
+ // Otherwise, if Port is not set and underlying QUIC listeners do not
+ // have valid port numbers, the port part is used in Alt-Svc headers set
+ // with SetQuicHeaders.
+ Addr string
+
+ // Port is used in Alt-Svc response headers set with SetQuicHeaders. If
+ // needed Port can be manually set when the Server is created.
+ //
+ // This is useful when a Layer 4 firewall is redirecting UDP traffic and
+ // clients must use a port different from the port the Server is
+ // listening on.
+ Port int
+
+ // TLSConfig provides a TLS configuration for use by server. It must be
+ // set for ListenAndServe and Serve methods.
+ TLSConfig *tls.Config
+
+ // QuicConfig provides the parameters for QUIC connection created with
+ // Serve. If nil, it uses reasonable default values.
+ //
+ // Configured versions are also used in Alt-Svc response header set with
+ // SetQuicHeaders.
+ QuicConfig *quic.Config
+
+ // Handler is the HTTP request handler to use. If not set, defaults to
+ // http.NotFound.
+ Handler http.Handler
+
+ // EnableDatagrams enables support for HTTP/3 datagrams.
+ // If set to true, QuicConfig.EnableDatagram will be set.
+ // See https://datatracker.ietf.org/doc/html/draft-ietf-masque-h3-datagram-07.
+ EnableDatagrams bool
+
+ // MaxHeaderBytes controls the maximum number of bytes the server will
+ // read parsing the request HEADERS frame. It does not limit the size of
+ // the request body. If zero or negative, http.DefaultMaxHeaderBytes is
+ // used.
+ MaxHeaderBytes int
+
+ // AdditionalSettings specifies additional HTTP/3 settings.
+ // It is invalid to specify any settings defined by the HTTP/3 draft and the datagram draft.
+ AdditionalSettings map[uint64]uint64
+
+ // StreamHijacker, when set, is called for the first unknown frame parsed on a bidirectional stream.
+ // It is called right after parsing the frame type.
+ // If parsing the frame type fails, the error is passed to the callback.
+ // In that case, the frame type will not be set.
+ // Callers can either ignore the frame and return control of the stream back to HTTP/3
+ // (by returning hijacked false).
+ // Alternatively, callers can take over the QUIC stream (by returning hijacked true).
+ StreamHijacker func(FrameType, quic.Connection, quic.Stream, error) (hijacked bool, err error)
+
+ // UniStreamHijacker, when set, is called for unknown unidirectional stream of unknown stream type.
+ // If parsing the stream type fails, the error is passed to the callback.
+ // In that case, the stream type will not be set.
+ UniStreamHijacker func(StreamType, quic.Connection, quic.ReceiveStream, error) (hijacked bool)
+
+ mutex sync.RWMutex
+ listeners map[*quic.EarlyListener]listenerInfo
+
+ closed bool
+
+ altSvcHeader string
+
+ logger utils.Logger
+}
+
+// ListenAndServe listens on the UDP address s.Addr and calls s.Handler to handle HTTP/3 requests on incoming connections.
+//
+// If s.Addr is blank, ":https" is used.
+func (s *Server) ListenAndServe() error {
+ return s.serveConn(s.TLSConfig, nil)
+}
+
+// ListenAndServeTLS listens on the UDP address s.Addr and calls s.Handler to handle HTTP/3 requests on incoming connections.
+//
+// If s.Addr is blank, ":https" is used.
+func (s *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ var err error
+ certs := make([]tls.Certificate, 1)
+ certs[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+ // We currently only use the cert-related stuff from tls.Config,
+ // so we don't need to make a full copy.
+ config := &tls.Config{
+ Certificates: certs,
+ }
+ return s.serveConn(config, nil)
+}
+
+// Serve an existing UDP connection.
+// It is possible to reuse the same connection for outgoing connections.
+// Closing the server does not close the connection.
+func (s *Server) Serve(conn net.PacketConn) error {
+ return s.serveConn(s.TLSConfig, conn)
+}
+
+// ServeQUICConn serves a single QUIC connection.
+func (s *Server) ServeQUICConn(conn quic.Connection) error {
+ s.mutex.Lock()
+ if s.logger == nil {
+ s.logger = utils.DefaultLogger.WithPrefix("server")
+ }
+ s.mutex.Unlock()
+
+ return s.handleConn(conn)
+}
+
+// ServeListener serves an existing QUIC listener.
+// Make sure you use http3.ConfigureTLSConfig to configure a tls.Config
+// and use it to construct a http3-friendly QUIC listener.
+// Closing the server does close the listener.
+func (s *Server) ServeListener(ln quic.EarlyListener) error {
+ if err := s.addListener(&ln); err != nil {
+ return err
+ }
+ err := s.serveListener(ln)
+ s.removeListener(&ln)
+ return err
+}
+
+var errServerWithoutTLSConfig = errors.New("use of http3.Server without TLSConfig")
+
+func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error {
+ if tlsConf == nil {
+ return errServerWithoutTLSConfig
+ }
+
+ s.mutex.Lock()
+ closed := s.closed
+ s.mutex.Unlock()
+ if closed {
+ return http.ErrServerClosed
+ }
+
+ baseConf := ConfigureTLSConfig(tlsConf)
+ quicConf := s.QuicConfig
+ if quicConf == nil {
+ quicConf = &quic.Config{Allow0RTT: func(net.Addr) bool { return true }}
+ } else {
+ quicConf = s.QuicConfig.Clone()
+ }
+ if s.EnableDatagrams {
+ quicConf.EnableDatagrams = true
+ }
+
+ var ln quic.EarlyListener
+ var err error
+ if conn == nil {
+ addr := s.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+ ln, err = quicListenAddr(addr, baseConf, quicConf)
+ } else {
+ ln, err = quicListen(conn, baseConf, quicConf)
+ }
+ if err != nil {
+ return err
+ }
+ if err := s.addListener(&ln); err != nil {
+ return err
+ }
+ err = s.serveListener(ln)
+ s.removeListener(&ln)
+ return err
+}
+
+func (s *Server) serveListener(ln quic.EarlyListener) error {
+ for {
+ conn, err := ln.Accept(context.Background())
+ if err != nil {
+ return err
+ }
+ go func() {
+ if err := s.handleConn(conn); err != nil {
+ s.logger.Debugf(err.Error())
+ }
+ }()
+ }
+}
+
+func extractPort(addr string) (int, error) {
+ _, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return 0, err
+ }
+
+ portInt, err := net.LookupPort("tcp", portStr)
+ if err != nil {
+ return 0, err
+ }
+ return portInt, nil
+}
+
+func (s *Server) generateAltSvcHeader() {
+ if len(s.listeners) == 0 {
+ // Don't announce any ports since no one is listening for connections
+ s.altSvcHeader = ""
+ return
+ }
+
+ // This code assumes that we will use protocol.SupportedVersions if no quic.Config is passed.
+ supportedVersions := protocol.SupportedVersions
+ if s.QuicConfig != nil && len(s.QuicConfig.Versions) > 0 {
+ supportedVersions = s.QuicConfig.Versions
+ }
+
+ // keep track of which have been seen so we don't yield duplicate values
+ seen := make(map[string]struct{}, len(supportedVersions))
+ var versionStrings []string
+ for _, version := range supportedVersions {
+ if v := versionToALPN(version); len(v) > 0 {
+ if _, ok := seen[v]; !ok {
+ versionStrings = append(versionStrings, v)
+ seen[v] = struct{}{}
+ }
+ }
+ }
+
+ var altSvc []string
+ addPort := func(port int) {
+ for _, v := range versionStrings {
+ altSvc = append(altSvc, fmt.Sprintf(`%s=":%d"; ma=2592000`, v, port))
+ }
+ }
+
+ if s.Port != 0 {
+ // if Port is specified, we must use it instead of the
+ // listener addresses since there's a reason it's specified.
+ addPort(s.Port)
+ } else {
+ // if we have some listeners assigned, try to find ports
+ // which we can announce, otherwise nothing should be announced
+ validPortsFound := false
+ for _, info := range s.listeners {
+ if info.port != 0 {
+ addPort(info.port)
+ validPortsFound = true
+ }
+ }
+ if !validPortsFound {
+ if port, err := extractPort(s.Addr); err == nil {
+ addPort(port)
+ }
+ }
+ }
+
+ s.altSvcHeader = strings.Join(altSvc, ",")
+}
+
+// We store a pointer to interface in the map set. This is safe because we only
+// call trackListener via Serve and can track+defer untrack the same pointer to
+// local variable there. We never need to compare a Listener from another caller.
+func (s *Server) addListener(l *quic.EarlyListener) error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ if s.closed {
+ return http.ErrServerClosed
+ }
+ if s.logger == nil {
+ s.logger = utils.DefaultLogger.WithPrefix("server")
+ }
+ if s.listeners == nil {
+ s.listeners = make(map[*quic.EarlyListener]listenerInfo)
+ }
+
+ if port, err := extractPort((*l).Addr().String()); err == nil {
+ s.listeners[l] = listenerInfo{port}
+ } else {
+ s.logger.Errorf(
+ "Unable to extract port from listener %+v, will not be announced using SetQuicHeaders: %s", err)
+ s.listeners[l] = listenerInfo{}
+ }
+ s.generateAltSvcHeader()
+ return nil
+}
+
+func (s *Server) removeListener(l *quic.EarlyListener) {
+ s.mutex.Lock()
+ delete(s.listeners, l)
+ s.generateAltSvcHeader()
+ s.mutex.Unlock()
+}
+
+func (s *Server) handleConn(conn quic.Connection) error {
+ decoder := qpack.NewDecoder(nil)
+
+ // send a SETTINGS frame
+ str, err := conn.OpenUniStream()
+ if err != nil {
+ return fmt.Errorf("opening the control stream failed: %w", err)
+ }
+ b := make([]byte, 0, 64)
+ b = quicvarint.Append(b, streamTypeControlStream) // stream type
+ b = (&settingsFrame{Datagram: s.EnableDatagrams, Other: s.AdditionalSettings}).Append(b)
+ str.Write(b)
+
+ go s.handleUnidirectionalStreams(conn)
+
+ // Process all requests immediately.
+ // It's the client's responsibility to decide which requests are eligible for 0-RTT.
+ for {
+ str, err := conn.AcceptStream(context.Background())
+ if err != nil {
+ var appErr *quic.ApplicationError
+ if errors.As(err, &appErr) && appErr.ErrorCode == quic.ApplicationErrorCode(errorNoError) {
+ return nil
+ }
+ return fmt.Errorf("accepting stream failed: %w", err)
+ }
+ go func() {
+ rerr := s.handleRequest(conn, str, decoder, func() {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorFrameUnexpected), "")
+ })
+ if rerr.err == errHijacked {
+ return
+ }
+ if rerr.err != nil || rerr.streamErr != 0 || rerr.connErr != 0 {
+ s.logger.Debugf("Handling request failed: %s", err)
+ if rerr.streamErr != 0 {
+ str.CancelWrite(quic.StreamErrorCode(rerr.streamErr))
+ }
+ if rerr.connErr != 0 {
+ var reason string
+ if rerr.err != nil {
+ reason = rerr.err.Error()
+ }
+ conn.CloseWithError(quic.ApplicationErrorCode(rerr.connErr), reason)
+ }
+ return
+ }
+ str.Close()
+ }()
+ }
+}
+
+func (s *Server) handleUnidirectionalStreams(conn quic.Connection) {
+ for {
+ str, err := conn.AcceptUniStream(context.Background())
+ if err != nil {
+ s.logger.Debugf("accepting unidirectional stream failed: %s", err)
+ return
+ }
+
+ go func(str quic.ReceiveStream) {
+ streamType, err := quicvarint.Read(quicvarint.NewReader(str))
+ if err != nil {
+ if s.UniStreamHijacker != nil && s.UniStreamHijacker(StreamType(streamType), conn, str, err) {
+ return
+ }
+ s.logger.Debugf("reading stream type on stream %d failed: %s", str.StreamID(), err)
+ return
+ }
+ // We're only interested in the control stream here.
+ switch streamType {
+ case streamTypeControlStream:
+ case streamTypeQPACKEncoderStream, streamTypeQPACKDecoderStream:
+ // Our QPACK implementation doesn't use the dynamic table yet.
+ // TODO: check that only one stream of each type is opened.
+ return
+ case streamTypePushStream: // only the server can push
+ conn.CloseWithError(quic.ApplicationErrorCode(errorStreamCreationError), "")
+ return
+ default:
+ if s.UniStreamHijacker != nil && s.UniStreamHijacker(StreamType(streamType), conn, str, nil) {
+ return
+ }
+ str.CancelRead(quic.StreamErrorCode(errorStreamCreationError))
+ return
+ }
+ f, err := parseNextFrame(str, nil)
+ if err != nil {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorFrameError), "")
+ return
+ }
+ sf, ok := f.(*settingsFrame)
+ if !ok {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorMissingSettings), "")
+ return
+ }
+ if !sf.Datagram {
+ return
+ }
+ // If datagram support was enabled on our side as well as on the client side,
+ // we can expect it to have been negotiated both on the transport and on the HTTP/3 layer.
+ // Note: ConnectionState() will block until the handshake is complete (relevant when using 0-RTT).
+ if s.EnableDatagrams && !conn.ConnectionState().SupportsDatagrams {
+ conn.CloseWithError(quic.ApplicationErrorCode(errorSettingsError), "missing QUIC Datagram support")
+ }
+ }(str)
+ }
+}
+
+func (s *Server) maxHeaderBytes() uint64 {
+ if s.MaxHeaderBytes <= 0 {
+ return http.DefaultMaxHeaderBytes
+ }
+ return uint64(s.MaxHeaderBytes)
+}
+
+func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *qpack.Decoder, onFrameError func()) requestError {
+ var ufh unknownFrameHandlerFunc
+ if s.StreamHijacker != nil {
+ ufh = func(ft FrameType, e error) (processed bool, err error) { return s.StreamHijacker(ft, conn, str, e) }
+ }
+ frame, err := parseNextFrame(str, ufh)
+ if err != nil {
+ if err == errHijacked {
+ return requestError{err: errHijacked}
+ }
+ return newStreamError(errorRequestIncomplete, err)
+ }
+ hf, ok := frame.(*headersFrame)
+ if !ok {
+ return newConnError(errorFrameUnexpected, errors.New("expected first frame to be a HEADERS frame"))
+ }
+ if hf.Length > s.maxHeaderBytes() {
+ return newStreamError(errorFrameError, fmt.Errorf("HEADERS frame too large: %d bytes (max: %d)", hf.Length, s.maxHeaderBytes()))
+ }
+ headerBlock := make([]byte, hf.Length)
+ if _, err := io.ReadFull(str, headerBlock); err != nil {
+ return newStreamError(errorRequestIncomplete, err)
+ }
+ hfs, err := decoder.DecodeFull(headerBlock)
+ if err != nil {
+ // TODO: use the right error code
+ return newConnError(errorGeneralProtocolError, err)
+ }
+ req, err := requestFromHeaders(hfs)
+ if err != nil {
+ // TODO: use the right error code
+ return newStreamError(errorGeneralProtocolError, err)
+ }
+
+ connState := conn.ConnectionState().TLS.ConnectionState
+ req.TLS = &connState
+ req.RemoteAddr = conn.RemoteAddr().String()
+ body := newRequestBody(newStream(str, onFrameError))
+ req.Body = body
+
+ if s.logger.Debug() {
+ s.logger.Infof("%s %s%s, on stream %d", req.Method, req.Host, req.RequestURI, str.StreamID())
+ } else {
+ s.logger.Infof("%s %s%s", req.Method, req.Host, req.RequestURI)
+ }
+
+ ctx := str.Context()
+ ctx = context.WithValue(ctx, ServerContextKey, s)
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, conn.LocalAddr())
+ req = req.WithContext(ctx)
+ r := newResponseWriter(str, conn, s.logger)
+ defer r.Flush()
+ handler := s.Handler
+ if handler == nil {
+ handler = http.DefaultServeMux
+ }
+
+ var panicked bool
+ func() {
+ defer func() {
+ if p := recover(); p != nil {
+ panicked = true
+ if p == http.ErrAbortHandler {
+ return
+ }
+ // Copied from net/http/server.go
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ s.logger.Errorf("http: panic serving: %v\n%s", p, buf)
+ }
+ }()
+ handler.ServeHTTP(r, req)
+ }()
+
+ if body.wasStreamHijacked() {
+ return requestError{err: errHijacked}
+ }
+
+ if panicked {
+ r.WriteHeader(http.StatusInternalServerError)
+ } else {
+ r.WriteHeader(http.StatusOK)
+ }
+ // If the EOF was read by the handler, CancelRead() is a no-op.
+ str.CancelRead(quic.StreamErrorCode(errorNoError))
+ return requestError{}
+}
+
+// Close the server immediately, aborting requests and sending CONNECTION_CLOSE frames to connected clients.
+// Close in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established.
+func (s *Server) Close() error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ s.closed = true
+
+ var err error
+ for ln := range s.listeners {
+ if cerr := (*ln).Close(); cerr != nil && err == nil {
+ err = cerr
+ }
+ }
+ return err
+}
+
+// CloseGracefully shuts down the server gracefully. The server sends a GOAWAY frame first, then waits for either timeout to trigger, or for all running requests to complete.
+// CloseGracefully in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established.
+func (s *Server) CloseGracefully(timeout time.Duration) error {
+ // TODO: implement
+ return nil
+}
+
+// ErrNoAltSvcPort is the error returned by SetQuicHeaders when no port was found
+// for Alt-Svc to announce. This can happen if listening on a PacketConn without a port
+// (UNIX socket, for example) and no port is specified in Server.Port or Server.Addr.
+var ErrNoAltSvcPort = errors.New("no port can be announced, specify it explicitly using Server.Port or Server.Addr")
+
+// SetQuicHeaders can be used to set the proper headers that announce that this server supports HTTP/3.
+// The values set by default advertise all of the ports the server is listening on, but can be
+// changed to a specific port by setting Server.Port before launching the serverr.
+// If no listener's Addr().String() returns an address with a valid port, Server.Addr will be used
+// to extract the port, if specified.
+// For example, a server launched using ListenAndServe on an address with port 443 would set:
+//
+// Alt-Svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
+func (s *Server) SetQuicHeaders(hdr http.Header) error {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+
+ if s.altSvcHeader == "" {
+ return ErrNoAltSvcPort
+ }
+ // use the map directly to avoid constant canonicalization
+ // since the key is already canonicalized
+ hdr["Alt-Svc"] = append(hdr["Alt-Svc"], s.altSvcHeader)
+ return nil
+}
+
+// ListenAndServeQUIC listens on the UDP network address addr and calls the
+// handler for HTTP/3 requests on incoming connections. http.DefaultServeMux is
+// used when handler is nil.
+func ListenAndServeQUIC(addr, certFile, keyFile string, handler http.Handler) error {
+ server := &Server{
+ Addr: addr,
+ Handler: handler,
+ }
+ return server.ListenAndServeTLS(certFile, keyFile)
+}
+
+// ListenAndServe listens on the given network address for both, TLS and QUIC
+// connections in parallel. It returns if one of the two returns an error.
+// http.DefaultServeMux is used when handler is nil.
+// The correct Alt-Svc headers for QUIC are set.
+func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error {
+ // Load certs
+ var err error
+ certs := make([]tls.Certificate, 1)
+ certs[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+ // We currently only use the cert-related stuff from tls.Config,
+ // so we don't need to make a full copy.
+ config := &tls.Config{
+ Certificates: certs,
+ }
+
+ if addr == "" {
+ addr = ":https"
+ }
+
+ // Open the listeners
+ udpAddr, err := net.ResolveUDPAddr("udp", addr)
+ if err != nil {
+ return err
+ }
+ udpConn, err := net.ListenUDP("udp", udpAddr)
+ if err != nil {
+ return err
+ }
+ defer udpConn.Close()
+
+ if handler == nil {
+ handler = http.DefaultServeMux
+ }
+ // Start the servers
+ quicServer := &Server{
+ TLSConfig: config,
+ Handler: handler,
+ }
+
+ hErr := make(chan error)
+ qErr := make(chan error)
+ go func() {
+ hErr <- http.ListenAndServeTLS(addr, certFile, keyFile, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ quicServer.SetQuicHeaders(w.Header())
+ handler.ServeHTTP(w, r)
+ }))
+ }()
+ go func() {
+ qErr <- quicServer.Serve(udpConn)
+ }()
+
+ select {
+ case err := <-hErr:
+ quicServer.Close()
+ return err
+ case err := <-qErr:
+ // Cannot close the HTTP server or wait for requests to complete properly :/
+ return err
+ }
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go
similarity index 80%
rename from vendor/github.com/lucas-clemente/quic-go/interface.go
rename to vendor/github.com/quic-go/quic-go/interface.go
index 6130b5497..e55f258e5 100644
--- a/vendor/github.com/lucas-clemente/quic-go/interface.go
+++ b/vendor/github.com/quic-go/quic-go/interface.go
@@ -7,9 +7,9 @@ import (
"net"
"time"
- "github.com/lucas-clemente/quic-go/internal/handshake"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/handshake"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/logging"
)
// The StreamID is the ID of a QUIC stream.
@@ -26,16 +26,6 @@ const (
Version2 = protocol.Version2
)
-// A Token can be used to verify the ownership of the client address.
-type Token struct {
- // IsRetryToken encodes how the client received the token. There are two ways:
- // * In a Retry packet sent when trying to establish a new connection.
- // * In a NEW_TOKEN frame on a previous connection.
- IsRetryToken bool
- RemoteAddr string
- SentTime time.Time
-}
-
// A ClientToken is a token received by the client.
// It can be used to skip address validation on future connection attempts.
type ClientToken struct {
@@ -183,10 +173,9 @@ type Connection interface {
// CloseWithError closes the connection with an error.
// The error string will be sent to the peer.
CloseWithError(ApplicationErrorCode, string) error
- // The context is cancelled when the connection is closed.
+ // Context returns a context that is cancelled when the connection is closed.
Context() context.Context
// ConnectionState returns basic details about the QUIC connection.
- // It blocks until the handshake completes.
// Warning: This API should not be considered stable and might change soon.
ConnectionState() ConnectionState
@@ -211,6 +200,38 @@ type EarlyConnection interface {
NextConnection() Connection
}
+// StatelessResetKey is a key used to derive stateless reset tokens.
+type StatelessResetKey [32]byte
+
+// A ConnectionID is a QUIC Connection ID, as defined in RFC 9000.
+// It is not able to handle QUIC Connection IDs longer than 20 bytes,
+// as they are allowed by RFC 8999.
+type ConnectionID = protocol.ConnectionID
+
+// ConnectionIDFromBytes interprets b as a Connection ID. It panics if b is
+// longer than 20 bytes.
+func ConnectionIDFromBytes(b []byte) ConnectionID {
+ return protocol.ParseConnectionID(b)
+}
+
+// A ConnectionIDGenerator is an interface that allows clients to implement their own format
+// for the Connection IDs that servers/clients use as SrcConnectionID in QUIC packets.
+//
+// Connection IDs generated by an implementation should always produce IDs of constant size.
+type ConnectionIDGenerator interface {
+ // GenerateConnectionID generates a new ConnectionID.
+ // Generated ConnectionIDs should be unique and observers should not be able to correlate two ConnectionIDs.
+ GenerateConnectionID() (ConnectionID, error)
+
+ // ConnectionIDLen tells what is the length of the ConnectionIDs generated by the implementation of
+ // this interface.
+ // Effectively, this means that implementations of ConnectionIDGenerator must always return constant-size
+ // connection IDs. Valid lengths are between 0 and 20 and calls to GenerateConnectionID.
+ // 0-length ConnectionsIDs can be used when an endpoint (server or client) does not require multiplexing connections
+ // in the presence of a connection migration environment.
+ ConnectionIDLen() int
+}
+
// Config contains all configuration data needed for a QUIC server or client.
type Config struct {
// The QUIC versions that can be negotiated.
@@ -223,6 +244,11 @@ type Config struct {
// If used for a server, or dialing on a packet conn, a 4 byte connection ID will be used.
// When dialing on a packet conn, the ConnectionIDLength value must be the same for every Dial call.
ConnectionIDLength int
+ // An optional ConnectionIDGenerator to be used for ConnectionIDs generated during the lifecycle of a QUIC connection.
+ // The goal is to give some control on how connection IDs, which can be useful in some scenarios, in particular for servers.
+ // By default, if not provided, random connection IDs with the length given by ConnectionIDLength is used.
+ // Otherwise, if one is provided, then ConnectionIDLength is ignored.
+ ConnectionIDGenerator ConnectionIDGenerator
// HandshakeIdleTimeout is the idle timeout before completion of the handshake.
// Specifically, if we don't receive any packet from the peer within this time, the connection attempt is aborted.
// If this value is zero, the timeout is set to 5 seconds.
@@ -233,14 +259,18 @@ type Config struct {
// If the timeout is exceeded, the connection is closed.
// If this value is zero, the timeout is set to 30 seconds.
MaxIdleTimeout time.Duration
- // AcceptToken determines if a Token is accepted.
- // It is called with token = nil if the client didn't send a token.
- // If not set, a default verification function is used:
- // * it verifies that the address matches, and
- // * if the token is a retry token, that it was issued within the last 5 seconds
- // * else, that it was issued within the last 24 hours.
- // This option is only valid for the server.
- AcceptToken func(clientAddr net.Addr, token *Token) bool
+ // RequireAddressValidation determines if a QUIC Retry packet is sent.
+ // This allows the server to verify the client's address, at the cost of increasing the handshake latency by 1 RTT.
+ // See https://datatracker.ietf.org/doc/html/rfc9000#section-8 for details.
+ // If not set, every client is forced to prove its remote address.
+ RequireAddressValidation func(net.Addr) bool
+ // MaxRetryTokenAge is the maximum age of a Retry token.
+ // If not set, it defaults to 5 seconds. Only valid for a server.
+ MaxRetryTokenAge time.Duration
+ // MaxTokenAge is the maximum age of the token presented during the handshake,
+ // for tokens that were issued on a previous connection.
+ // If not set, it defaults to 24 hours. Only valid for a server.
+ MaxTokenAge time.Duration
// The TokenStore stores tokens received from the server.
// Tokens are used to skip address validation on future connection attempts.
// The key used to store tokens is the ServerName from the tls.Config, if set
@@ -268,7 +298,7 @@ type Config struct {
// limit the memory usage.
// To avoid deadlocks, it is not valid to call other functions on the connection or on streams
// in this callback.
- AllowConnectionWindowIncrease func(sess Connection, delta uint64) bool
+ AllowConnectionWindowIncrease func(conn Connection, delta uint64) bool
// MaxIncomingStreams is the maximum number of concurrent bidirectional streams that a peer is allowed to open.
// Values above 2^60 are invalid.
// If not set, it will default to 100.
@@ -281,7 +311,7 @@ type Config struct {
MaxIncomingUniStreams int64
// The StatelessResetKey is used to generate stateless reset tokens.
// If no key is configured, sending of stateless resets is disabled.
- StatelessResetKey []byte
+ StatelessResetKey *StatelessResetKey
// KeepAlivePeriod defines whether this peer will periodically send a packet to keep the connection alive.
// If set to 0, then no keep alive is sent. Otherwise, the keep alive is sent on that period (or at most
// every half of MaxIdleTimeout, whichever is smaller).
@@ -294,8 +324,12 @@ type Config struct {
// This can be useful if version information is exchanged out-of-band.
// It has no effect for a client.
DisableVersionNegotiationPackets bool
- // See https://datatracker.ietf.org/doc/draft-ietf-quic-datagram/.
- // Datagrams will only be available when both peers enable datagram support.
+ // Allow0RTT allows the application to decide if a 0-RTT connection attempt should be accepted.
+ // When set, 0-RTT is enabled. When not set, 0-RTT is disabled.
+ // Only valid for the server.
+ // Warning: This API should not be considered stable and might change soon.
+ Allow0RTT func(net.Addr) bool
+ // Enable QUIC datagram support (RFC 9221).
EnableDatagrams bool
Tracer logging.Tracer
}
@@ -304,6 +338,7 @@ type Config struct {
type ConnectionState struct {
TLS handshake.ConnectionState
SupportsDatagrams bool
+ Version VersionNumber
}
// A Listener for incoming QUIC connections
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ack_eliciting.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go
similarity index 80%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ack_eliciting.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go
index b8cd558ae..4bab41901 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ack_eliciting.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go
@@ -1,6 +1,6 @@
package ackhandler
-import "github.com/lucas-clemente/quic-go/internal/wire"
+import "github.com/quic-go/quic-go/internal/wire"
// IsFrameAckEliciting returns true if the frame is ack-eliciting.
func IsFrameAckEliciting(f wire.Frame) bool {
@@ -10,7 +10,7 @@ func IsFrameAckEliciting(f wire.Frame) bool {
}
// HasAckElicitingFrames returns true if at least one frame is ack-eliciting.
-func HasAckElicitingFrames(fs []Frame) bool {
+func HasAckElicitingFrames(fs []*Frame) bool {
for _, f := range fs {
if IsFrameAckEliciting(f.Frame) {
return true
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go
new file mode 100644
index 000000000..2c7cc4fcf
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go
@@ -0,0 +1,23 @@
+package ackhandler
+
+import (
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/logging"
+)
+
+// NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler.
+// clientAddressValidated indicates whether the address was validated beforehand by an address validation token.
+// clientAddressValidated has no effect for a client.
+func NewAckHandler(
+ initialPacketNumber protocol.PacketNumber,
+ initialMaxDatagramSize protocol.ByteCount,
+ rttStats *utils.RTTStats,
+ clientAddressValidated bool,
+ pers protocol.Perspective,
+ tracer logging.ConnectionTracer,
+ logger utils.Logger,
+) (SentPacketHandler, ReceivedPacketHandler) {
+ sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, clientAddressValidated, pers, tracer, logger)
+ return sph, newReceivedPacketHandler(sph, rttStats, logger)
+}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go
new file mode 100644
index 000000000..deb23cfcb
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go
@@ -0,0 +1,29 @@
+package ackhandler
+
+import (
+ "sync"
+
+ "github.com/quic-go/quic-go/internal/wire"
+)
+
+type Frame struct {
+ wire.Frame // nil if the frame has already been acknowledged in another packet
+ OnLost func(wire.Frame)
+ OnAcked func(wire.Frame)
+}
+
+var framePool = sync.Pool{New: func() any { return &Frame{} }}
+
+func GetFrame() *Frame {
+ f := framePool.Get().(*Frame)
+ f.OnLost = nil
+ f.OnAcked = nil
+ return f
+}
+
+func putFrame(f *Frame) {
+ f.Frame = nil
+ f.OnLost = nil
+ f.OnAcked = nil
+ framePool.Put(f)
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
similarity index 74%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
index 5777d97a7..5924f84bd 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
@@ -3,26 +3,10 @@ package ackhandler
import (
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
-// A Packet is a packet
-type Packet struct {
- PacketNumber protocol.PacketNumber
- Frames []Frame
- LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK
- Length protocol.ByteCount
- EncryptionLevel protocol.EncryptionLevel
- SendTime time.Time
-
- IsPathMTUProbePacket bool // We don't report the loss of Path MTU probe packets to the congestion controller.
-
- includedInBytesInFlight bool
- declaredLost bool
- skippedPacket bool
-}
-
// SentPacketHandler handles ACKs received for outgoing packets
type SentPacketHandler interface {
// SentPacket may modify the packet
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go
new file mode 100644
index 000000000..366e5520d
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go
@@ -0,0 +1,3 @@
+package ackhandler
+
+//go:generate sh -c "../../mockgen_private.sh ackhandler mock_sent_packet_tracker_test.go github.com/quic-go/quic-go/internal/ackhandler sentPacketTracker"
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
new file mode 100644
index 000000000..394ee40a9
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
@@ -0,0 +1,55 @@
+package ackhandler
+
+import (
+ "sync"
+ "time"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+)
+
+// A Packet is a packet
+type Packet struct {
+ PacketNumber protocol.PacketNumber
+ Frames []*Frame
+ LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK
+ Length protocol.ByteCount
+ EncryptionLevel protocol.EncryptionLevel
+ SendTime time.Time
+
+ IsPathMTUProbePacket bool // We don't report the loss of Path MTU probe packets to the congestion controller.
+
+ includedInBytesInFlight bool
+ declaredLost bool
+ skippedPacket bool
+}
+
+func (p *Packet) outstanding() bool {
+ return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket
+}
+
+var packetPool = sync.Pool{New: func() any { return &Packet{} }}
+
+func GetPacket() *Packet {
+ p := packetPool.Get().(*Packet)
+ p.PacketNumber = 0
+ p.Frames = nil
+ p.LargestAcked = 0
+ p.Length = 0
+ p.EncryptionLevel = protocol.EncryptionLevel(0)
+ p.SendTime = time.Time{}
+ p.IsPathMTUProbePacket = false
+ p.includedInBytesInFlight = false
+ p.declaredLost = false
+ p.skippedPacket = false
+ return p
+}
+
+// We currently only return Packets back into the pool when they're acknowledged (not when they're lost).
+// This simplifies the code, and gives the vast majority of the performance benefit we can gain from using the pool.
+func putPacket(p *Packet) {
+ for _, f := range p.Frames {
+ putFrame(f)
+ }
+ p.Frames = nil
+ packetPool.Put(p)
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go
similarity index 92%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go
index 7d58650cc..9cf20a0b0 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go
@@ -1,8 +1,8 @@
package ackhandler
import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
type packetNumberGenerator interface {
@@ -72,5 +72,5 @@ func (p *skippingPacketNumberGenerator) Pop() protocol.PacketNumber {
func (p *skippingPacketNumberGenerator) generateNewSkip() {
// make sure that there are never two consecutive packet numbers that are skipped
p.nextToSkip = p.next + 2 + protocol.PacketNumber(p.rng.Int31n(int32(2*p.period)))
- p.period = utils.MinPacketNumber(2*p.period, p.maxPeriod)
+ p.period = utils.Min(2*p.period, p.maxPeriod)
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
similarity index 84%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
index 89fb30d31..3675694f4 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go
@@ -4,9 +4,9 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type receivedPacketHandler struct {
@@ -25,13 +25,12 @@ func newReceivedPacketHandler(
sentPackets sentPacketTracker,
rttStats *utils.RTTStats,
logger utils.Logger,
- version protocol.VersionNumber,
) ReceivedPacketHandler {
return &receivedPacketHandler{
sentPackets: sentPackets,
- initialPackets: newReceivedPacketTracker(rttStats, logger, version),
- handshakePackets: newReceivedPacketTracker(rttStats, logger, version),
- appDataPackets: newReceivedPacketTracker(rttStats, logger, version),
+ initialPackets: newReceivedPacketTracker(rttStats, logger),
+ handshakePackets: newReceivedPacketTracker(rttStats, logger),
+ appDataPackets: newReceivedPacketTracker(rttStats, logger),
lowest1RTTPacket: protocol.InvalidPacketNumber,
}
}
@@ -46,24 +45,26 @@ func (h *receivedPacketHandler) ReceivedPacket(
h.sentPackets.ReceivedPacket(encLevel)
switch encLevel {
case protocol.EncryptionInitial:
- h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
+ return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
case protocol.EncryptionHandshake:
- h.handshakePackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
+ return h.handshakePackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
case protocol.Encryption0RTT:
if h.lowest1RTTPacket != protocol.InvalidPacketNumber && pn > h.lowest1RTTPacket {
return fmt.Errorf("received packet number %d on a 0-RTT packet after receiving %d on a 1-RTT packet", pn, h.lowest1RTTPacket)
}
- h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
+ return h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
case protocol.Encryption1RTT:
if h.lowest1RTTPacket == protocol.InvalidPacketNumber || pn < h.lowest1RTTPacket {
h.lowest1RTTPacket = pn
}
+ if err := h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck); err != nil {
+ return err
+ }
h.appDataPackets.IgnoreBelow(h.sentPackets.GetLowestPacketNotConfirmedAcked())
- h.appDataPackets.ReceivedPacket(pn, ecn, rcvTime, shouldInstigateAck)
+ return nil
default:
panic(fmt.Sprintf("received packet with unknown encryption level: %s", encLevel))
}
- return nil
}
func (h *receivedPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) {
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go
similarity index 74%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go
index 5a0391ef2..3143bfe12 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go
@@ -1,23 +1,37 @@
package ackhandler
import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "sync"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ list "github.com/quic-go/quic-go/internal/utils/linkedlist"
+ "github.com/quic-go/quic-go/internal/wire"
)
+// interval is an interval from one PacketNumber to the other
+type interval struct {
+ Start protocol.PacketNumber
+ End protocol.PacketNumber
+}
+
+var intervalElementPool sync.Pool
+
+func init() {
+ intervalElementPool = *list.NewPool[interval]()
+}
+
// The receivedPacketHistory stores if a packet number has already been received.
// It generates ACK ranges which can be used to assemble an ACK frame.
// It does not store packet contents.
type receivedPacketHistory struct {
- ranges *utils.PacketIntervalList
+ ranges *list.List[interval]
deletedBelow protocol.PacketNumber
}
func newReceivedPacketHistory() *receivedPacketHistory {
return &receivedPacketHistory{
- ranges: utils.NewPacketIntervalList(),
+ ranges: list.NewWithPool[interval](&intervalElementPool),
}
}
@@ -34,7 +48,7 @@ func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) bool /*
func (h *receivedPacketHistory) addToRanges(p protocol.PacketNumber) bool /* is a new packet (and not a duplicate / delayed packet) */ {
if h.ranges.Len() == 0 {
- h.ranges.PushBack(utils.PacketInterval{Start: p, End: p})
+ h.ranges.PushBack(interval{Start: p, End: p})
return true
}
@@ -61,13 +75,13 @@ func (h *receivedPacketHistory) addToRanges(p protocol.PacketNumber) bool /* is
// create a new range at the end
if p > el.Value.End {
- h.ranges.InsertAfter(utils.PacketInterval{Start: p, End: p}, el)
+ h.ranges.InsertAfter(interval{Start: p, End: p}, el)
return true
}
}
// create a new range at the beginning
- h.ranges.InsertBefore(utils.PacketInterval{Start: p, End: p}, h.ranges.Front())
+ h.ranges.InsertBefore(interval{Start: p, End: p}, h.ranges.Front())
return true
}
@@ -101,17 +115,12 @@ func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) {
}
}
-// GetAckRanges gets a slice of all AckRanges that can be used in an AckFrame
-func (h *receivedPacketHistory) GetAckRanges() []wire.AckRange {
- if h.ranges.Len() == 0 {
- return nil
- }
-
- ackRanges := make([]wire.AckRange, h.ranges.Len())
- i := 0
- for el := h.ranges.Back(); el != nil; el = el.Prev() {
- ackRanges[i] = wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End}
- i++
+// AppendAckRanges appends to a slice of all AckRanges that can be used in an AckFrame
+func (h *receivedPacketHistory) AppendAckRanges(ackRanges []wire.AckRange) []wire.AckRange {
+ if h.ranges.Len() > 0 {
+ for el := h.ranges.Back(); el != nil; el = el.Prev() {
+ ackRanges = append(ackRanges, wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End})
+ }
}
return ackRanges
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go
similarity index 86%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go
index 56e792695..7132ccaad 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go
@@ -1,11 +1,12 @@
package ackhandler
import (
+ "fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
// number of ack-eliciting packets received before sending an ack.
@@ -30,27 +31,23 @@ type receivedPacketTracker struct {
lastAck *wire.AckFrame
logger utils.Logger
-
- version protocol.VersionNumber
}
func newReceivedPacketTracker(
rttStats *utils.RTTStats,
logger utils.Logger,
- version protocol.VersionNumber,
) *receivedPacketTracker {
return &receivedPacketTracker{
packetHistory: newReceivedPacketHistory(),
maxAckDelay: protocol.MaxAckDelay,
rttStats: rttStats,
logger: logger,
- version: version,
}
}
-func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumber, ecn protocol.ECN, rcvTime time.Time, shouldInstigateAck bool) {
- if packetNumber < h.ignoreBelow {
- return
+func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumber, ecn protocol.ECN, rcvTime time.Time, shouldInstigateAck bool) error {
+ if isNew := h.packetHistory.ReceivedPacket(packetNumber); !isNew {
+ return fmt.Errorf("recevedPacketTracker BUG: ReceivedPacket called for old / duplicate packet %d", packetNumber)
}
isMissing := h.isMissing(packetNumber)
@@ -59,7 +56,7 @@ func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumbe
h.largestObservedReceivedTime = rcvTime
}
- if isNew := h.packetHistory.ReceivedPacket(packetNumber); isNew && shouldInstigateAck {
+ if shouldInstigateAck {
h.hasNewAck = true
}
if shouldInstigateAck {
@@ -74,6 +71,7 @@ func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumbe
case protocol.ECNCE:
h.ecnce++
}
+ return nil
}
// IgnoreBelow sets a lower limit for acknowledging packets.
@@ -171,16 +169,16 @@ func (h *receivedPacketTracker) GetAckFrame(onlyIfQueued bool) *wire.AckFrame {
}
}
- ack := &wire.AckFrame{
- AckRanges: h.packetHistory.GetAckRanges(),
- // Make sure that the DelayTime is always positive.
- // This is not guaranteed on systems that don't have a monotonic clock.
- DelayTime: utils.MaxDuration(0, now.Sub(h.largestObservedReceivedTime)),
- ECT0: h.ect0,
- ECT1: h.ect1,
- ECNCE: h.ecnce,
- }
+ ack := wire.GetAckFrame()
+ ack.DelayTime = utils.Max(0, now.Sub(h.largestObservedReceivedTime))
+ ack.ECT0 = h.ect0
+ ack.ECT1 = h.ect1
+ ack.ECNCE = h.ecnce
+ ack.AckRanges = h.packetHistory.AppendAckRanges(ack.AckRanges)
+ if h.lastAck != nil {
+ wire.PutAckFrame(h.lastAck)
+ }
h.lastAck = ack
h.ackAlarm = time.Time{}
h.ackQueued = false
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
similarity index 91%
rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go
rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
index 7df91f23f..732bbc3a1 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
@@ -5,12 +5,12 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/congestion"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/congestion"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
)
const (
@@ -23,6 +23,8 @@ const (
amplificationFactor = 3
// We use Retry packets to derive an RTT estimate. Make sure we don't set the RTT to a super low value yet.
minRTTAfterRetry = 5 * time.Millisecond
+ // The PTO duration uses exponential backoff, but is truncated to a maximum value, as allowed by RFC 8961, section 4.4.
+ maxPTODuration = 60 * time.Second
)
type packetNumberSpace struct {
@@ -101,10 +103,13 @@ var (
_ sentPacketTracker = &sentPacketHandler{}
)
+// clientAddressValidated indicates whether the address was validated beforehand by an address validation token.
+// If the address was validated, the amplification limit doesn't apply. It has no effect for a client.
func newSentPacketHandler(
initialPN protocol.PacketNumber,
initialMaxDatagramSize protocol.ByteCount,
rttStats *utils.RTTStats,
+ clientAddressValidated bool,
pers protocol.Perspective,
tracer logging.ConnectionTracer,
logger utils.Logger,
@@ -119,7 +124,7 @@ func newSentPacketHandler(
return &sentPacketHandler{
peerCompletedAddressValidation: pers == protocol.PerspectiveServer,
- peerAddressValidated: pers == protocol.PerspectiveClient,
+ peerAddressValidated: pers == protocol.PerspectiveClient || clientAddressValidated,
initialPackets: newPacketNumberSpace(initialPN, false, rttStats),
handshakePackets: newPacketNumberSpace(0, false, rttStats),
appDataPackets: newPacketNumberSpace(0, true, rttStats),
@@ -223,14 +228,20 @@ func (h *sentPacketHandler) packetsInFlight() int {
return packetsInFlight
}
-func (h *sentPacketHandler) SentPacket(packet *Packet) {
- h.bytesSent += packet.Length
+func (h *sentPacketHandler) SentPacket(p *Packet) {
+ h.bytesSent += p.Length
// For the client, drop the Initial packet number space when the first Handshake packet is sent.
- if h.perspective == protocol.PerspectiveClient && packet.EncryptionLevel == protocol.EncryptionHandshake && h.initialPackets != nil {
+ if h.perspective == protocol.PerspectiveClient && p.EncryptionLevel == protocol.EncryptionHandshake && h.initialPackets != nil {
h.dropPackets(protocol.EncryptionInitial)
}
- isAckEliciting := h.sentPacketImpl(packet)
- h.getPacketNumberSpace(packet.EncryptionLevel).history.SentPacket(packet, isAckEliciting)
+ isAckEliciting := h.sentPacketImpl(p)
+ if isAckEliciting {
+ h.getPacketNumberSpace(p.EncryptionLevel).history.SentAckElicitingPacket(p)
+ } else {
+ h.getPacketNumberSpace(p.EncryptionLevel).history.SentNonAckElicitingPacket(p.PacketNumber, p.EncryptionLevel, p.SendTime)
+ putPacket(p)
+ p = nil //nolint:ineffassign // This is just to be on the safe side.
+ }
if h.tracer != nil && isAckEliciting {
h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight())
}
@@ -256,7 +267,7 @@ func (h *sentPacketHandler) sentPacketImpl(packet *Packet) bool /* is ack-elicit
pnSpace := h.getPacketNumberSpace(packet.EncryptionLevel)
if h.logger.Debug() && pnSpace.history.HasOutstandingPackets() {
- for p := utils.MaxPacketNumber(0, pnSpace.largestSent+1); p < packet.PacketNumber; p++ {
+ for p := utils.Max(0, pnSpace.largestSent+1); p < packet.PacketNumber; p++ {
h.logger.Debugf("Skipping packet number %d", p)
}
}
@@ -288,7 +299,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
}
}
- pnSpace.largestAcked = utils.MaxPacketNumber(pnSpace.largestAcked, largestAcked)
+ pnSpace.largestAcked = utils.Max(pnSpace.largestAcked, largestAcked)
// Servers complete address validation when a protected packet is received.
if h.perspective == protocol.PerspectiveClient && !h.peerCompletedAddressValidation &&
@@ -310,7 +321,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
// don't use the ack delay for Initial and Handshake packets
var ackDelay time.Duration
if encLevel == protocol.Encryption1RTT {
- ackDelay = utils.MinDuration(ack.DelayTime, h.rttStats.MaxAckDelay())
+ ackDelay = utils.Min(ack.DelayTime, h.rttStats.MaxAckDelay())
}
h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay, rcvTime)
if h.logger.Debug() {
@@ -331,7 +342,11 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
acked1RTTPacket = true
}
h.removeFromBytesInFlight(p)
+ putPacket(p)
}
+ // After this point, we must not use ackedPackets any longer!
+ // We've already returned the buffers.
+ ackedPackets = nil //nolint:ineffassign // This is just to be on the safe side.
// Reset the pto_count unless the client is unsure if the server has validated the client's address.
if h.peerCompletedAddressValidation {
@@ -406,7 +421,7 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
for _, p := range h.ackedPackets {
if p.LargestAcked != protocol.InvalidPacketNumber && encLevel == protocol.Encryption1RTT {
- h.lowestNotConfirmedAcked = utils.MaxPacketNumber(h.lowestNotConfirmedAcked, p.LargestAcked+1)
+ h.lowestNotConfirmedAcked = utils.Max(h.lowestNotConfirmedAcked, p.LargestAcked+1)
}
for _, f := range p.Frames {
@@ -444,6 +459,14 @@ func (h *sentPacketHandler) getLossTimeAndSpace() (time.Time, protocol.Encryptio
return lossTime, encLevel
}
+func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration {
+ pto := h.rttStats.PTO(includeMaxAckDelay) << h.ptoCount
+ if pto > maxPTODuration || pto <= 0 {
+ return maxPTODuration
+ }
+ return pto
+}
+
// same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime
func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) {
// We only send application data probe packets once the handshake is confirmed,
@@ -452,7 +475,7 @@ func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protoc
if h.peerCompletedAddressValidation {
return
}
- t := time.Now().Add(h.rttStats.PTO(false) << h.ptoCount)
+ t := time.Now().Add(h.getScaledPTO(false))
if h.initialPackets != nil {
return t, protocol.EncryptionInitial, true
}
@@ -462,18 +485,18 @@ func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protoc
if h.initialPackets != nil {
encLevel = protocol.EncryptionInitial
if t := h.initialPackets.lastAckElicitingPacketTime; !t.IsZero() {
- pto = t.Add(h.rttStats.PTO(false) << h.ptoCount)
+ pto = t.Add(h.getScaledPTO(false))
}
}
if h.handshakePackets != nil && !h.handshakePackets.lastAckElicitingPacketTime.IsZero() {
- t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.rttStats.PTO(false) << h.ptoCount)
+ t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(false))
if pto.IsZero() || (!t.IsZero() && t.Before(pto)) {
pto = t
encLevel = protocol.EncryptionHandshake
}
}
if h.handshakeConfirmed && !h.appDataPackets.lastAckElicitingPacketTime.IsZero() {
- t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.rttStats.PTO(true) << h.ptoCount)
+ t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(true))
if pto.IsZero() || (!t.IsZero() && t.Before(pto)) {
pto = t
encLevel = protocol.Encryption1RTT
@@ -554,11 +577,11 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
pnSpace := h.getPacketNumberSpace(encLevel)
pnSpace.lossTime = time.Time{}
- maxRTT := float64(utils.MaxDuration(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT()))
+ maxRTT := float64(utils.Max(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT()))
lossDelay := time.Duration(timeThreshold * maxRTT)
// Minimum time of granularity before packets are deemed lost.
- lossDelay = utils.MaxDuration(lossDelay, protocol.TimerGranularity)
+ lossDelay = utils.Max(lossDelay, protocol.TimerGranularity)
// Packets sent before this time are deemed lost.
lostSendTime := now.Add(-lossDelay)
@@ -598,7 +621,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
pnSpace.lossTime = lossTime
}
if packetLost {
- p.declaredLost = true
+ p = pnSpace.history.DeclareLost(p)
// the bytes in flight need to be reduced no matter if the frames in this packet will be retransmitted
h.removeFromBytesInFlight(p)
h.queueFramesForRetransmission(p)
@@ -767,7 +790,7 @@ func (h *sentPacketHandler) QueueProbePacket(encLevel protocol.EncryptionLevel)
// TODO: don't declare the packet lost here.
// Keep track of acknowledged frames instead.
h.removeFromBytesInFlight(p)
- p.declaredLost = true
+ pnSpace.history.DeclareLost(p)
return true
}
@@ -808,7 +831,7 @@ func (h *sentPacketHandler) ResetForRetry() error {
if h.ptoCount == 0 {
// Don't set the RTT to a value lower than 5ms here.
now := time.Now()
- h.rttStats.UpdateRTT(utils.MaxDuration(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now)
+ h.rttStats.UpdateRTT(utils.Max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now)
if h.logger.Debug() {
h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
new file mode 100644
index 000000000..064783991
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
@@ -0,0 +1,163 @@
+package ackhandler
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ list "github.com/quic-go/quic-go/internal/utils/linkedlist"
+)
+
+type sentPacketHistory struct {
+ rttStats *utils.RTTStats
+ outstandingPacketList *list.List[*Packet]
+ etcPacketList *list.List[*Packet]
+ packetMap map[protocol.PacketNumber]*list.Element[*Packet]
+ highestSent protocol.PacketNumber
+}
+
+var packetElementPool sync.Pool
+
+func init() {
+ packetElementPool = *list.NewPool[*Packet]()
+}
+
+func newSentPacketHistory(rttStats *utils.RTTStats) *sentPacketHistory {
+ return &sentPacketHistory{
+ rttStats: rttStats,
+ outstandingPacketList: list.NewWithPool[*Packet](&packetElementPool),
+ etcPacketList: list.NewWithPool[*Packet](&packetElementPool),
+ packetMap: make(map[protocol.PacketNumber]*list.Element[*Packet]),
+ highestSent: protocol.InvalidPacketNumber,
+ }
+}
+
+func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel, t time.Time) {
+ h.registerSentPacket(pn, encLevel, t)
+}
+
+func (h *sentPacketHistory) SentAckElicitingPacket(p *Packet) {
+ h.registerSentPacket(p.PacketNumber, p.EncryptionLevel, p.SendTime)
+
+ var el *list.Element[*Packet]
+ if p.outstanding() {
+ el = h.outstandingPacketList.PushBack(p)
+ } else {
+ el = h.etcPacketList.PushBack(p)
+ }
+ h.packetMap[p.PacketNumber] = el
+}
+
+func (h *sentPacketHistory) registerSentPacket(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel, t time.Time) {
+ if pn <= h.highestSent {
+ panic("non-sequential packet number use")
+ }
+ // Skipped packet numbers.
+ for p := h.highestSent + 1; p < pn; p++ {
+ el := h.etcPacketList.PushBack(&Packet{
+ PacketNumber: p,
+ EncryptionLevel: encLevel,
+ SendTime: t,
+ skippedPacket: true,
+ })
+ h.packetMap[p] = el
+ }
+ h.highestSent = pn
+}
+
+// Iterate iterates through all packets.
+func (h *sentPacketHistory) Iterate(cb func(*Packet) (cont bool, err error)) error {
+ cont := true
+ outstandingEl := h.outstandingPacketList.Front()
+ etcEl := h.etcPacketList.Front()
+ var el *list.Element[*Packet]
+ // whichever has the next packet number is returned first
+ for cont {
+ if outstandingEl == nil || (etcEl != nil && etcEl.Value.PacketNumber < outstandingEl.Value.PacketNumber) {
+ el = etcEl
+ } else {
+ el = outstandingEl
+ }
+ if el == nil {
+ return nil
+ }
+ if el == outstandingEl {
+ outstandingEl = outstandingEl.Next()
+ } else {
+ etcEl = etcEl.Next()
+ }
+ var err error
+ cont, err = cb(el.Value)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// FirstOutstanding returns the first outstanding packet.
+func (h *sentPacketHistory) FirstOutstanding() *Packet {
+ el := h.outstandingPacketList.Front()
+ if el == nil {
+ return nil
+ }
+ return el.Value
+}
+
+func (h *sentPacketHistory) Len() int {
+ return len(h.packetMap)
+}
+
+func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error {
+ el, ok := h.packetMap[p]
+ if !ok {
+ return fmt.Errorf("packet %d not found in sent packet history", p)
+ }
+ el.List().Remove(el)
+ delete(h.packetMap, p)
+ return nil
+}
+
+func (h *sentPacketHistory) HasOutstandingPackets() bool {
+ return h.outstandingPacketList.Len() > 0
+}
+
+func (h *sentPacketHistory) DeleteOldPackets(now time.Time) {
+ maxAge := 3 * h.rttStats.PTO(false)
+ var nextEl *list.Element[*Packet]
+ // we don't iterate outstandingPacketList, as we should not delete outstanding packets.
+ // being outstanding for more than 3*PTO should only happen in the case of drastic RTT changes.
+ for el := h.etcPacketList.Front(); el != nil; el = nextEl {
+ nextEl = el.Next()
+ p := el.Value
+ if p.SendTime.After(now.Add(-maxAge)) {
+ break
+ }
+ delete(h.packetMap, p.PacketNumber)
+ h.etcPacketList.Remove(el)
+ }
+}
+
+func (h *sentPacketHistory) DeclareLost(p *Packet) *Packet {
+ el, ok := h.packetMap[p.PacketNumber]
+ if !ok {
+ return nil
+ }
+ el.List().Remove(el)
+ p.declaredLost = true
+ // move it to the correct position in the etc list (based on the packet number)
+ for el = h.etcPacketList.Back(); el != nil; el = el.Prev() {
+ if el.Value.PacketNumber < p.PacketNumber {
+ break
+ }
+ }
+ if el == nil {
+ el = h.etcPacketList.PushFront(p)
+ } else {
+ el = h.etcPacketList.InsertAfter(p, el)
+ }
+ h.packetMap[p.PacketNumber] = el
+ return el.Value
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go b/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go
similarity index 91%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go
index 96b1c5aa8..1d03abbb8 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go
+++ b/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go
@@ -4,7 +4,7 @@ import (
"math"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// Bandwidth of a connection
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go b/vendor/github.com/quic-go/quic-go/internal/congestion/clock.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/clock.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go
index beadd627c..a73cf82aa 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go
+++ b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go
@@ -4,8 +4,8 @@ import (
"math"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
// This cubic implementation is based on the one found in Chromiums's QUIC
@@ -187,7 +187,7 @@ func (c *Cubic) CongestionWindowAfterAck(
targetCongestionWindow = c.originPointCongestionWindow - deltaCongestionWindow
}
// Limit the CWND increase to half the acked bytes.
- targetCongestionWindow = utils.MinByteCount(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2)
+ targetCongestionWindow = utils.Min(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2)
// Increase the window by approximately Alpha * 1 MSS of bytes every
// time we ack an estimated tcp window of bytes. For small
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go
index 059b8f6a5..dac3118e3 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go
+++ b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go
@@ -4,9 +4,9 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/logging"
)
const (
@@ -178,7 +178,7 @@ func (c *cubicSender) OnPacketAcked(
priorInFlight protocol.ByteCount,
eventTime time.Time,
) {
- c.largestAckedPacketNumber = utils.MaxPacketNumber(ackedPacketNumber, c.largestAckedPacketNumber)
+ c.largestAckedPacketNumber = utils.Max(ackedPacketNumber, c.largestAckedPacketNumber)
if c.InRecovery() {
return
}
@@ -246,7 +246,7 @@ func (c *cubicSender) maybeIncreaseCwnd(
c.numAckedPackets = 0
}
} else {
- c.congestionWindow = utils.MinByteCount(c.maxCongestionWindow(), c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
+ c.congestionWindow = utils.Min(c.maxCongestionWindow(), c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
}
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go b/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go
similarity index 91%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go
index b5ae3d5eb..b2f7c908e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go
+++ b/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go
@@ -3,8 +3,8 @@ package congestion
import (
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
// Note(pwestin): the magic clamping numbers come from the original code in
@@ -75,8 +75,8 @@ func (s *HybridSlowStart) ShouldExitSlowStart(latestRTT time.Duration, minRTT ti
// Divide minRTT by 8 to get a rtt increase threshold for exiting.
minRTTincreaseThresholdUs := int64(minRTT / time.Microsecond >> hybridStartDelayFactorExp)
// Ensure the rtt threshold is never less than 2ms or more than 16ms.
- minRTTincreaseThresholdUs = utils.MinInt64(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs)
- minRTTincreaseThreshold := time.Duration(utils.MaxInt64(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond
+ minRTTincreaseThresholdUs = utils.Min(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs)
+ minRTTincreaseThreshold := time.Duration(utils.Max(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond
if s.currentMinRTT > (minRTT + minRTTincreaseThreshold) {
s.hystartFound = true
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go b/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go
similarity index 94%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/interface.go
index 5157383f3..5db3ebae0 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go
+++ b/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go
@@ -3,7 +3,7 @@ package congestion
import (
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// A SendAlgorithm performs congestion control
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go b/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go
similarity index 90%
rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go
rename to vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go
index 7ec4d8f57..a5861062e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go
+++ b/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go
@@ -4,8 +4,8 @@ import (
"math"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
const maxBurstSizePackets = 10
@@ -50,11 +50,11 @@ func (p *pacer) Budget(now time.Time) protocol.ByteCount {
return p.maxBurstSize()
}
budget := p.budgetAtLastSent + (protocol.ByteCount(p.getAdjustedBandwidth())*protocol.ByteCount(now.Sub(p.lastSentTime).Nanoseconds()))/1e9
- return utils.MinByteCount(p.maxBurstSize(), budget)
+ return utils.Min(p.maxBurstSize(), budget)
}
func (p *pacer) maxBurstSize() protocol.ByteCount {
- return utils.MaxByteCount(
+ return utils.Max(
protocol.ByteCount(uint64((protocol.MinPacingDelay+protocol.TimerGranularity).Nanoseconds())*p.getAdjustedBandwidth())/1e9,
maxBurstSizePackets*p.maxDatagramSize,
)
@@ -66,7 +66,7 @@ func (p *pacer) TimeUntilSend() time.Time {
if p.budgetAtLastSent >= p.maxDatagramSize {
return time.Time{}
}
- return p.lastSentTime.Add(utils.MaxDuration(
+ return p.lastSentTime.Add(utils.Max(
protocol.MinPacingDelay,
time.Duration(math.Ceil(float64(p.maxDatagramSize-p.budgetAtLastSent)*1e9/float64(p.getAdjustedBandwidth())))*time.Nanosecond,
))
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go
similarity index 93%
rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go
rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go
index 2bf14fdc0..f3f24a60e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go
+++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go
@@ -4,8 +4,8 @@ import (
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
type baseFlowController struct {
@@ -47,7 +47,7 @@ func (c *baseFlowController) AddBytesSent(n protocol.ByteCount) {
c.bytesSent += n
}
-// UpdateSendWindow is be called after receiving a MAX_{STREAM_}DATA frame.
+// UpdateSendWindow is called after receiving a MAX_{STREAM_}DATA frame.
func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) {
if offset > c.sendWindow {
c.sendWindow = offset
@@ -107,7 +107,7 @@ func (c *baseFlowController) maybeAdjustWindowSize() {
now := time.Now()
if now.Sub(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) {
// window is consumed too fast, try to increase the window size
- newSize := utils.MinByteCount(2*c.receiveWindowSize, c.maxReceiveWindowSize)
+ newSize := utils.Min(2*c.receiveWindowSize, c.maxReceiveWindowSize)
if newSize > c.receiveWindowSize && (c.allowWindowIncrease == nil || c.allowWindowIncrease(newSize-c.receiveWindowSize)) {
c.receiveWindowSize = newSize
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go
similarity index 94%
rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go
rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go
index 6bf2241b9..13e69d6c4 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go
+++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go
@@ -5,9 +5,9 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
)
type connectionFlowController struct {
@@ -87,7 +87,7 @@ func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCoun
c.mutex.Lock()
if inc > c.receiveWindowSize {
c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10))
- newSize := utils.MinByteCount(inc, c.maxReceiveWindowSize)
+ newSize := utils.Min(inc, c.maxReceiveWindowSize)
if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) {
c.receiveWindowSize = newSize
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go
rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go
index 1eeaee9fe..946519d52 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go
+++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go
@@ -1,6 +1,6 @@
package flowcontrol
-import "github.com/lucas-clemente/quic-go/internal/protocol"
+import "github.com/quic-go/quic-go/internal/protocol"
type flowController interface {
// for sending
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go
similarity index 94%
rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go
rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go
index aa66aef1a..1770a9c84 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go
+++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go
@@ -3,9 +3,9 @@ package flowcontrol
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
)
type streamFlowController struct {
@@ -123,7 +123,7 @@ func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) {
}
func (c *streamFlowController) SendWindowSize() protocol.ByteCount {
- return utils.MinByteCount(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize())
+ return utils.Min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize())
}
func (c *streamFlowController) shouldQueueWindowUpdate() bool {
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/aead.go
index 03b039289..410745f1a 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go
@@ -4,9 +4,9 @@ import (
"crypto/cipher"
"encoding/binary"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qtls"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/utils"
)
func createAEAD(suite *qtls.CipherSuiteTLS13, trafficSecret []byte, v protocol.VersionNumber) cipher.AEAD {
@@ -83,7 +83,7 @@ func (o *longHeaderOpener) Open(dst, src []byte, pn protocol.PacketNumber, ad []
// It uses the nonce provided here and XOR it with the IV.
dec, err := o.aead.Open(dst, o.nonceBuf, src, ad)
if err == nil {
- o.highestRcvdPN = utils.MaxPacketNumber(o.highestRcvdPN, pn)
+ o.highestRcvdPN = utils.Max(o.highestRcvdPN, pn)
} else {
err = ErrDecryptionFailed
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
similarity index 92%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
index 31d9bf0aa..ec14868cf 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
@@ -6,17 +6,18 @@ import (
"errors"
"fmt"
"io"
+ "math"
"net"
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/qtls"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
+ "github.com/quic-go/quic-go/quicvarint"
)
// TLS unexpected_message alert
@@ -115,6 +116,7 @@ type cryptoSetup struct {
clientHelloWritten bool
clientHelloWrittenChan chan struct{} // is closed as soon as the ClientHello is written
zeroRTTParametersChan chan<- *wire.TransportParameters
+ allow0RTT func() bool
rttStats *utils.RTTStats
@@ -195,7 +197,7 @@ func NewCryptoSetupServer(
tp *wire.TransportParameters,
runner handshakeRunner,
tlsConf *tls.Config,
- enable0RTT bool,
+ allow0RTT func() bool,
rttStats *utils.RTTStats,
tracer logging.ConnectionTracer,
logger utils.Logger,
@@ -208,13 +210,14 @@ func NewCryptoSetupServer(
tp,
runner,
tlsConf,
- enable0RTT,
+ allow0RTT != nil,
rttStats,
tracer,
logger,
protocol.PerspectiveServer,
version,
)
+ cs.allow0RTT = allow0RTT
cs.conn = qtls.Server(newConn(localAddr, remoteAddr, version), cs.tlsConf, cs.extraConf)
return cs
}
@@ -260,14 +263,14 @@ func newCryptoSetup(
alertChan: make(chan uint8),
clientHelloWrittenChan: make(chan struct{}),
zeroRTTParametersChan: zeroRTTParametersChan,
- messageChan: make(chan []byte, 100),
+ messageChan: make(chan []byte, 1),
isReadingHandshakeMessage: make(chan struct{}),
closeChan: make(chan struct{}),
version: version,
}
var maxEarlyData uint32
if enable0RTT {
- maxEarlyData = 0xffffffff
+ maxEarlyData = math.MaxUint32
}
cs.extraConf = &qtls.ExtraConfig{
GetExtensions: extHandler.GetExtensions,
@@ -340,7 +343,7 @@ func (h *cryptoSetup) onError(alert uint8, message string) {
if alert == 0 {
err = &qerr.TransportError{ErrorCode: qerr.InternalError, ErrorMessage: message}
} else {
- err = qerr.NewCryptoError(alert, message)
+ err = qerr.NewLocalCryptoError(alert, message)
}
h.runner.OnError(err)
}
@@ -365,8 +368,15 @@ func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLev
h.onError(alertUnexpectedMessage, err.Error())
return false
}
- h.messageChan <- data
+ if encLevel != protocol.Encryption1RTT {
+ select {
+ case h.messageChan <- data:
+ case <-h.handshakeDone: // handshake errored, nobody is going to consume this message
+ return false
+ }
+ }
if encLevel == protocol.Encryption1RTT {
+ h.messageChan <- data
h.handlePostHandshakeMessage()
return false
}
@@ -432,11 +442,10 @@ func (h *cryptoSetup) handleTransportParameters(data []byte) {
// must be called after receiving the transport parameters
func (h *cryptoSetup) marshalDataForSessionState() []byte {
- buf := &bytes.Buffer{}
- quicvarint.Write(buf, clientSessionStateRevision)
- quicvarint.Write(buf, uint64(h.rttStats.SmoothedRTT().Microseconds()))
- h.peerParams.MarshalForSessionTicket(buf)
- return buf.Bytes()
+ b := make([]byte, 0, 256)
+ b = quicvarint.Append(b, clientSessionStateRevision)
+ b = quicvarint.Append(b, uint64(h.rttStats.SmoothedRTT().Microseconds()))
+ return h.peerParams.MarshalForSessionTicket(b)
}
func (h *cryptoSetup) handleDataFromSessionState(data []byte) {
@@ -491,13 +500,17 @@ func (h *cryptoSetup) accept0RTT(sessionTicketData []byte) bool {
return false
}
valid := h.ourParams.ValidFor0RTT(t.Parameters)
- if valid {
- h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT)
- h.rttStats.SetInitialRTT(t.RTT)
- } else {
+ if !valid {
h.logger.Debugf("Transport parameters changed. Rejecting 0-RTT.")
+ return false
}
- return valid
+ if !h.allow0RTT() {
+ h.logger.Debugf("0-RTT not allowed. Rejecting 0-RTT.")
+ return false
+ }
+ h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT)
+ h.rttStats.SetInitialRTT(t.RTT)
+ return true
}
// rejected0RTT is called for the client when the server rejects 0-RTT.
@@ -576,7 +589,9 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph
newHeaderProtector(suite, trafficSecret, true, h.version),
)
h.mutex.Unlock()
- h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+ if h.logger.Debug() {
+ h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+ }
if h.tracer != nil {
h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective.Opposite())
}
@@ -589,12 +604,16 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph
h.dropInitialKeys,
h.perspective,
)
- h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+ if h.logger.Debug() {
+ h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+ }
case qtls.EncryptionApplication:
h.readEncLevel = protocol.Encryption1RTT
h.aead.SetReadKey(suite, trafficSecret)
h.has1RTTOpener = true
- h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+ if h.logger.Debug() {
+ h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+ }
default:
panic("unexpected read encryption level")
}
@@ -616,7 +635,9 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip
newHeaderProtector(suite, trafficSecret, true, h.version),
)
h.mutex.Unlock()
- h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+ if h.logger.Debug() {
+ h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+ }
if h.tracer != nil {
h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective)
}
@@ -629,12 +650,16 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip
h.dropInitialKeys,
h.perspective,
)
- h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+ if h.logger.Debug() {
+ h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+ }
case qtls.EncryptionApplication:
h.writeEncLevel = protocol.Encryption1RTT
h.aead.SetWriteKey(suite, trafficSecret)
h.has1RTTSealer = true
- h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+ if h.logger.Debug() {
+ h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+ }
if h.zeroRTTSealer != nil {
h.zeroRTTSealer = nil
h.logger.Debugf("Dropping 0-RTT keys.")
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/header_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/header_protector.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go
index 1f800c50f..274fb30cb 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/header_protector.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go
@@ -9,8 +9,8 @@ import (
"golang.org/x/crypto/chacha20"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qtls"
)
type headerProtector interface {
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/hkdf.go b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/hkdf.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go
similarity index 88%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go
index 00ed243c7..3967fdb83 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go
@@ -6,14 +6,14 @@ import (
"golang.org/x/crypto/hkdf"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qtls"
)
var (
quicSaltOld = []byte{0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, 0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99}
quicSaltV1 = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a}
- quicSaltV2 = []byte{0xa7, 0x07, 0xc2, 0x03, 0xa5, 0x9b, 0x47, 0x18, 0x4a, 0x1d, 0x62, 0xca, 0x57, 0x04, 0x06, 0xea, 0x7a, 0xe3, 0xe5, 0xd3}
+ quicSaltV2 = []byte{0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9}
)
const (
@@ -62,7 +62,7 @@ func NewInitialAEAD(connID protocol.ConnectionID, pers protocol.Perspective, v p
}
func computeSecrets(connID protocol.ConnectionID, v protocol.VersionNumber) (clientSecret, serverSecret []byte) {
- initialSecret := hkdf.Extract(crypto.SHA256.New, connID, getSalt(v))
+ initialSecret := hkdf.Extract(crypto.SHA256.New, connID.Bytes(), getSalt(v))
clientSecret = hkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "client in", crypto.SHA256.Size())
serverSecret = hkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "server in", crypto.SHA256.Size())
return
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/interface.go
index 112f6c258..e7baea906 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go
@@ -6,9 +6,9 @@ import (
"net"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qtls"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/wire"
)
var (
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go b/vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go
new file mode 100644
index 000000000..f91e7e8a0
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go
@@ -0,0 +1,3 @@
+package handshake
+
+//go:generate sh -c "../../mockgen_private.sh handshake mock_handshake_runner_test.go github.com/quic-go/quic-go/internal/handshake handshakeRunner"
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go
new file mode 100644
index 000000000..ff14f7e0d
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go
@@ -0,0 +1,70 @@
+package handshake
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "fmt"
+ "sync"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+)
+
+var (
+ retryAEADdraft29 cipher.AEAD // used for QUIC draft versions up to 34
+ retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000)
+ retryAEADv2 cipher.AEAD // used for QUIC v2
+)
+
+func init() {
+ retryAEADdraft29 = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1})
+ retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
+ retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92})
+}
+
+func initAEAD(key [16]byte) cipher.AEAD {
+ aes, err := aes.NewCipher(key[:])
+ if err != nil {
+ panic(err)
+ }
+ aead, err := cipher.NewGCM(aes)
+ if err != nil {
+ panic(err)
+ }
+ return aead
+}
+
+var (
+ retryBuf bytes.Buffer
+ retryMutex sync.Mutex
+ retryNonceDraft29 = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}
+ retryNonceV1 = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}
+ retryNonceV2 = [12]byte{0xd8, 0x69, 0x69, 0xbc, 0x2d, 0x7c, 0x6d, 0x99, 0x90, 0xef, 0xb0, 0x4a}
+)
+
+// GetRetryIntegrityTag calculates the integrity tag on a Retry packet
+func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte {
+ retryMutex.Lock()
+ defer retryMutex.Unlock()
+
+ retryBuf.WriteByte(uint8(origDestConnID.Len()))
+ retryBuf.Write(origDestConnID.Bytes())
+ retryBuf.Write(retry)
+ defer retryBuf.Reset()
+
+ var tag [16]byte
+ var sealed []byte
+ //nolint:exhaustive // These are all the versions we support
+ switch version {
+ case protocol.Version1:
+ sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes())
+ case protocol.Version2:
+ sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes())
+ default:
+ sealed = retryAEADdraft29.Seal(tag[:0], retryNonceDraft29[:], nil, retryBuf.Bytes())
+ }
+ if len(sealed) != 16 {
+ panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed)))
+ }
+ return &tag
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/session_ticket.go b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go
similarity index 77%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/session_ticket.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go
index 75cc04f98..56bcbcd5d 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/session_ticket.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go
@@ -6,8 +6,8 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/quicvarint"
)
const sessionTicketRevision = 2
@@ -18,11 +18,10 @@ type sessionTicket struct {
}
func (t *sessionTicket) Marshal() []byte {
- b := &bytes.Buffer{}
- quicvarint.Write(b, sessionTicketRevision)
- quicvarint.Write(b, uint64(t.RTT.Microseconds()))
- t.Parameters.MarshalForSessionTicket(b)
- return b.Bytes()
+ b := make([]byte, 0, 256)
+ b = quicvarint.Append(b, sessionTicketRevision)
+ b = quicvarint.Append(b, uint64(t.RTT.Microseconds()))
+ return t.Parameters.MarshalForSessionTicket(b)
}
func (t *sessionTicket) Unmarshal(b []byte) error {
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_extension_handler.go
similarity index 92%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/tls_extension_handler.go
index 3a6790341..6105fe401 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_extension_handler.go
@@ -1,8 +1,8 @@
package handshake
import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qtls"
)
const (
@@ -24,7 +24,7 @@ var _ tlsExtensionHandler = &extensionHandler{}
// newExtensionHandler creates a new extension handler
func newExtensionHandler(params []byte, pers protocol.Perspective, v protocol.VersionNumber) tlsExtensionHandler {
et := uint16(quicTLSExtensionType)
- if v != protocol.Version1 {
+ if v == protocol.VersionDraft29 {
et = quicTLSExtensionTypeOldDrafts
}
return &extensionHandler{
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go
similarity index 76%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go
index 2df5fcd8c..e5e90bb3b 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go
@@ -1,13 +1,14 @@
package handshake
import (
+ "bytes"
"encoding/asn1"
"fmt"
"io"
"net"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
const (
@@ -17,14 +18,19 @@ const (
// A Token is derived from the client address and can be used to verify the ownership of this address.
type Token struct {
- IsRetryToken bool
- RemoteAddr string
- SentTime time.Time
+ IsRetryToken bool
+ SentTime time.Time
+ encodedRemoteAddr []byte
// only set for retry tokens
OriginalDestConnectionID protocol.ConnectionID
RetrySrcConnectionID protocol.ConnectionID
}
+// ValidateRemoteAddr validates the address, but does not check expiration
+func (t *Token) ValidateRemoteAddr(addr net.Addr) bool {
+ return bytes.Equal(encodeRemoteAddr(addr), t.encodedRemoteAddr)
+}
+
// token is the struct that is used for ASN1 serialization and deserialization
type token struct {
IsRetryToken bool
@@ -59,8 +65,8 @@ func (g *TokenGenerator) NewRetryToken(
data, err := asn1.Marshal(token{
IsRetryToken: true,
RemoteAddr: encodeRemoteAddr(raddr),
- OriginalDestConnectionID: origDestConnID,
- RetrySrcConnectionID: retrySrcConnID,
+ OriginalDestConnectionID: origDestConnID.Bytes(),
+ RetrySrcConnectionID: retrySrcConnID.Bytes(),
Timestamp: time.Now().UnixNano(),
})
if err != nil {
@@ -101,13 +107,13 @@ func (g *TokenGenerator) DecodeToken(encrypted []byte) (*Token, error) {
return nil, fmt.Errorf("rest when unpacking token: %d", len(rest))
}
token := &Token{
- IsRetryToken: t.IsRetryToken,
- RemoteAddr: decodeRemoteAddr(t.RemoteAddr),
- SentTime: time.Unix(0, t.Timestamp),
+ IsRetryToken: t.IsRetryToken,
+ SentTime: time.Unix(0, t.Timestamp),
+ encodedRemoteAddr: t.RemoteAddr,
}
if t.IsRetryToken {
- token.OriginalDestConnectionID = protocol.ConnectionID(t.OriginalDestConnectionID)
- token.RetrySrcConnectionID = protocol.ConnectionID(t.RetrySrcConnectionID)
+ token.OriginalDestConnectionID = protocol.ParseConnectionID(t.OriginalDestConnectionID)
+ token.RetrySrcConnectionID = protocol.ParseConnectionID(t.RetrySrcConnectionID)
}
return token, nil
}
@@ -119,16 +125,3 @@ func encodeRemoteAddr(remoteAddr net.Addr) []byte {
}
return append([]byte{tokenPrefixString}, []byte(remoteAddr.String())...)
}
-
-// decodeRemoteAddr decodes the remote address saved in the token
-func decodeRemoteAddr(data []byte) string {
- // data will never be empty for a token that we generated.
- // Check it to be on the safe side
- if len(data) == 0 {
- return ""
- }
- if data[0] == tokenPrefixIP {
- return net.IP(data[1:]).String()
- }
- return string(data[1:])
-}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_protector.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go
index 1532e7b5a..89a9dcd62 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go
@@ -8,11 +8,11 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/qtls"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/logging"
)
// KeyUpdateInterval is the maximum number of packets we send or receive before initiating a key update.
@@ -169,7 +169,7 @@ func (a *updatableAEAD) Open(dst, src []byte, rcvTime time.Time, pn protocol.Pac
}
}
if err == nil {
- a.highestRcvdPN = utils.MaxPacketNumber(a.highestRcvdPN, pn)
+ a.highestRcvdPN = utils.Max(a.highestRcvdPN, pn)
}
return dec, err
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/logutils/frame.go b/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go
similarity index 54%
rename from vendor/github.com/lucas-clemente/quic-go/internal/logutils/frame.go
rename to vendor/github.com/quic-go/quic-go/internal/logutils/frame.go
index 6e0fd311b..a6032fc20 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/logutils/frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go
@@ -1,9 +1,9 @@
package logutils
import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
)
// ConvertFrame converts a wire.Frame into a logging.Frame.
@@ -11,6 +11,10 @@ import (
// Furthermore, it removes the data slices from CRYPTO and STREAM frames.
func ConvertFrame(frame wire.Frame) logging.Frame {
switch f := frame.(type) {
+ case *wire.AckFrame:
+ // We use a pool for ACK frames.
+ // Implementations of the tracer interface may hold on to frames, so we need to make a copy here.
+ return ConvertAckFrame(f)
case *wire.CryptoFrame:
return &logging.CryptoFrame{
Offset: f.Offset,
@@ -31,3 +35,16 @@ func ConvertFrame(frame wire.Frame) logging.Frame {
return logging.Frame(frame)
}
}
+
+func ConvertAckFrame(f *wire.AckFrame) *logging.AckFrame {
+ ranges := make([]wire.AckRange, 0, len(f.AckRanges))
+ ranges = append(ranges, f.AckRanges...)
+ ack := &logging.AckFrame{
+ AckRanges: ranges,
+ DelayTime: f.DelayTime,
+ ECNCE: f.ECNCE,
+ ECT0: f.ECT0,
+ ECT1: f.ECT1,
+ }
+ return ack
+}
diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go b/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go
new file mode 100644
index 000000000..77259b5fa
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go
@@ -0,0 +1,116 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+)
+
+var ErrInvalidConnectionIDLen = errors.New("invalid Connection ID length")
+
+// An ArbitraryLenConnectionID is a QUIC Connection ID able to represent Connection IDs according to RFC 8999.
+// Future QUIC versions might allow connection ID lengths up to 255 bytes, while QUIC v1
+// restricts the length to 20 bytes.
+type ArbitraryLenConnectionID []byte
+
+func (c ArbitraryLenConnectionID) Len() int {
+ return len(c)
+}
+
+func (c ArbitraryLenConnectionID) Bytes() []byte {
+ return c
+}
+
+func (c ArbitraryLenConnectionID) String() string {
+ if c.Len() == 0 {
+ return "(empty)"
+ }
+ return fmt.Sprintf("%x", c.Bytes())
+}
+
+const maxConnectionIDLen = 20
+
+// A ConnectionID in QUIC
+type ConnectionID struct {
+ b [20]byte
+ l uint8
+}
+
+// GenerateConnectionID generates a connection ID using cryptographic random
+func GenerateConnectionID(l int) (ConnectionID, error) {
+ var c ConnectionID
+ c.l = uint8(l)
+ _, err := rand.Read(c.b[:l])
+ return c, err
+}
+
+// ParseConnectionID interprets b as a Connection ID.
+// It panics if b is longer than 20 bytes.
+func ParseConnectionID(b []byte) ConnectionID {
+ if len(b) > maxConnectionIDLen {
+ panic("invalid conn id length")
+ }
+ var c ConnectionID
+ c.l = uint8(len(b))
+ copy(c.b[:c.l], b)
+ return c
+}
+
+// GenerateConnectionIDForInitial generates a connection ID for the Initial packet.
+// It uses a length randomly chosen between 8 and 20 bytes.
+func GenerateConnectionIDForInitial() (ConnectionID, error) {
+ r := make([]byte, 1)
+ if _, err := rand.Read(r); err != nil {
+ return ConnectionID{}, err
+ }
+ l := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1)
+ return GenerateConnectionID(l)
+}
+
+// ReadConnectionID reads a connection ID of length len from the given io.Reader.
+// It returns io.EOF if there are not enough bytes to read.
+func ReadConnectionID(r io.Reader, l int) (ConnectionID, error) {
+ var c ConnectionID
+ if l == 0 {
+ return c, nil
+ }
+ if l > maxConnectionIDLen {
+ return c, ErrInvalidConnectionIDLen
+ }
+ c.l = uint8(l)
+ _, err := io.ReadFull(r, c.b[:l])
+ if err == io.ErrUnexpectedEOF {
+ return c, io.EOF
+ }
+ return c, err
+}
+
+// Len returns the length of the connection ID in bytes
+func (c ConnectionID) Len() int {
+ return int(c.l)
+}
+
+// Bytes returns the byte representation
+func (c ConnectionID) Bytes() []byte {
+ return c.b[:c.l]
+}
+
+func (c ConnectionID) String() string {
+ if c.Len() == 0 {
+ return "(empty)"
+ }
+ return fmt.Sprintf("%x", c.Bytes())
+}
+
+type DefaultConnectionIDGenerator struct {
+ ConnLen int
+}
+
+func (d *DefaultConnectionIDGenerator) GenerateConnectionID() (ConnectionID, error) {
+ return GenerateConnectionID(d.ConnLen)
+}
+
+func (d *DefaultConnectionIDGenerator) ConnectionIDLen() int {
+ return d.ConnLen
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/key_phase.go b/vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/key_phase.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go b/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go
similarity index 99%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/params.go
index 831371139..60c867794 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go
+++ b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go
@@ -134,7 +134,7 @@ const MaxAckFrameSize ByteCount = 1000
// MaxDatagramFrameSize is the maximum size of a DATAGRAM frame (RFC 9221).
// The size is chosen such that a DATAGRAM frame fits into a QUIC packet.
-const MaxDatagramFrameSize ByteCount = 1220
+const MaxDatagramFrameSize ByteCount = 1200
// DatagramRcvQueueLen is the length of the receive queue for DATAGRAM frames (RFC 9221)
const DatagramRcvQueueLen = 128
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go b/vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go b/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream.go b/vendor/github.com/quic-go/quic-go/internal/protocol/stream.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/stream.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go
similarity index 98%
rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go
rename to vendor/github.com/quic-go/quic-go/internal/protocol/version.go
index dd54dbd3c..2ae7a1154 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go
+++ b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go
@@ -23,7 +23,7 @@ const (
VersionUnknown VersionNumber = math.MaxUint32
VersionDraft29 VersionNumber = 0xff00001d
Version1 VersionNumber = 0x1
- Version2 VersionNumber = 0x709a50c4
+ Version2 VersionNumber = 0x6b3343cf
)
// SupportedVersions lists the versions that the server supports
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go b/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go
rename to vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go
index bee42d51f..cc846df6a 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go
+++ b/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go
@@ -3,7 +3,7 @@ package qerr
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/qtls"
+ "github.com/quic-go/quic-go/internal/qtls"
)
// TransportErrorCode is a QUIC transport error.
@@ -81,7 +81,7 @@ func (e TransportErrorCode) String() string {
return "NO_VIABLE_PATH"
default:
if e.IsCryptoError() {
- return fmt.Sprintf("CRYPTO_ERROR (%#x)", uint16(e))
+ return fmt.Sprintf("CRYPTO_ERROR %#x", uint16(e))
}
return fmt.Sprintf("unknown error code: %#x", uint16(e))
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/errors.go b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go
similarity index 84%
rename from vendor/github.com/lucas-clemente/quic-go/internal/qerr/errors.go
rename to vendor/github.com/quic-go/quic-go/internal/qerr/errors.go
index 8b1cff980..26ea34452 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/errors.go
+++ b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go
@@ -4,7 +4,7 @@ import (
"fmt"
"net"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
var (
@@ -21,8 +21,8 @@ type TransportError struct {
var _ error = &TransportError{}
-// NewCryptoError create a new TransportError instance for a crypto error
-func NewCryptoError(tlsAlert uint8, errorMessage string) *TransportError {
+// NewLocalCryptoError create a new TransportError instance for a crypto error
+func NewLocalCryptoError(tlsAlert uint8, errorMessage string) *TransportError {
return &TransportError{
ErrorCode: 0x100 + TransportErrorCode(tlsAlert),
ErrorMessage: errorMessage,
@@ -30,7 +30,7 @@ func NewCryptoError(tlsAlert uint8, errorMessage string) *TransportError {
}
func (e *TransportError) Error() string {
- str := e.ErrorCode.String()
+ str := fmt.Sprintf("%s (%s)", e.ErrorCode.String(), getRole(e.Remote))
if e.FrameType != 0 {
str += fmt.Sprintf(" (frame type: %#x)", e.FrameType)
}
@@ -68,9 +68,9 @@ var _ error = &ApplicationError{}
func (e *ApplicationError) Error() string {
if len(e.ErrorMessage) == 0 {
- return fmt.Sprintf("Application error %#x", e.ErrorCode)
+ return fmt.Sprintf("Application error %#x (%s)", e.ErrorCode, getRole(e.Remote))
}
- return fmt.Sprintf("Application error %#x: %s", e.ErrorCode, e.ErrorMessage)
+ return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage)
}
type IdleTimeoutError struct{}
@@ -122,3 +122,10 @@ func (e *StatelessResetError) Is(target error) bool {
func (e *StatelessResetError) Timeout() bool { return false }
func (e *StatelessResetError) Temporary() bool { return true }
+
+func getRole(remote bool) string {
+ if remote {
+ return "remote"
+ }
+ return "local"
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go119.go
similarity index 94%
rename from vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go
rename to vendor/github.com/quic-go/quic-go/internal/qtls/go119.go
index 86dcaea3d..6c804ccef 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go
+++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go119.go
@@ -1,5 +1,4 @@
-//go:build go1.19
-// +build go1.19
+//go:build go1.19 && !go1.20
package qtls
@@ -10,7 +9,7 @@ import (
"net"
"unsafe"
- "github.com/marten-seemann/qtls-go1-19"
+ "github.com/quic-go/qtls-go1-19"
)
type (
@@ -84,7 +83,7 @@ type cipherSuiteTLS13 struct {
Hash crypto.Hash
}
-//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-19.cipherSuiteTLS13ByID
+//go:linkname cipherSuiteTLS13ByID github.com/quic-go/qtls-go1-19.cipherSuiteTLS13ByID
func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13
// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite.
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go120.go
similarity index 91%
rename from vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go
rename to vendor/github.com/quic-go/quic-go/internal/qtls/go120.go
index 5de030c78..b9baa52fe 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go
+++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go120.go
@@ -1,5 +1,4 @@
-//go:build go1.18 && !go1.19
-// +build go1.18,!go1.19
+//go:build go1.20
package qtls
@@ -10,7 +9,7 @@ import (
"net"
"unsafe"
- "github.com/marten-seemann/qtls-go1-18"
+ "github.com/quic-go/qtls-go1-20"
)
type (
@@ -18,7 +17,7 @@ type (
Alert = qtls.Alert
// A Certificate is qtls.Certificate.
Certificate = qtls.Certificate
- // CertificateRequestInfo contains inforamtion about a certificate request.
+ // CertificateRequestInfo contains information about a certificate request.
CertificateRequestInfo = qtls.CertificateRequestInfo
// A CipherSuiteTLS13 is a cipher suite for TLS 1.3
CipherSuiteTLS13 = qtls.CipherSuiteTLS13
@@ -84,7 +83,7 @@ type cipherSuiteTLS13 struct {
Hash crypto.Hash
}
-//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-18.cipherSuiteTLS13ByID
+//go:linkname cipherSuiteTLS13ByID github.com/quic-go/qtls-go1-20.cipherSuiteTLS13ByID
func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13
// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite.
diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/go121.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go121.go
new file mode 100644
index 000000000..b33406397
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go121.go
@@ -0,0 +1,5 @@
+//go:build go1.21
+
+package qtls
+
+var _ int = "The version of quic-go you're using can't be built on Go 1.21 yet. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions."
diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go
new file mode 100644
index 000000000..e15f03629
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go
@@ -0,0 +1,5 @@
+//go:build !go1.19
+
+package qtls
+
+var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions."
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/buffered_write_closer.go b/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/buffered_write_closer.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go
similarity index 85%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go
index d1f528429..a9b715e2f 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go
@@ -7,6 +7,10 @@ import (
// A ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
+ Uint32([]byte) uint32
+ Uint24([]byte) uint32
+ Uint16([]byte) uint16
+
ReadUint32(io.ByteReader) (uint32, error)
ReadUint24(io.ByteReader) (uint32, error)
ReadUint16(io.ByteReader) (uint16, error)
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go
similarity index 85%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go
index d05542e1d..834a711b9 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go
@@ -2,6 +2,7 @@ package utils
import (
"bytes"
+ "encoding/binary"
"io"
)
@@ -73,6 +74,19 @@ func (bigEndian) ReadUint16(b io.ByteReader) (uint16, error) {
return uint16(b1) + uint16(b2)<<8, nil
}
+func (bigEndian) Uint32(b []byte) uint32 {
+ return binary.BigEndian.Uint32(b)
+}
+
+func (bigEndian) Uint24(b []byte) uint32 {
+ _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
+}
+
+func (bigEndian) Uint16(b []byte) uint16 {
+ return binary.BigEndian.Uint16(b)
+}
+
// WriteUint32 writes a uint32
func (bigEndian) WriteUint32(b *bytes.Buffer, i uint32) {
b.Write([]byte{uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i)})
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/ip.go b/vendor/github.com/quic-go/quic-go/internal/utils/ip.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/ip.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/ip.go
diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md
new file mode 100644
index 000000000..66482f4fb
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md
@@ -0,0 +1,6 @@
+# Usage
+
+This is the Go standard library implementation of a linked list
+(https://golang.org/src/container/list/list.go), with the following modifications:
+* it uses Go generics
+* it allows passing in a `sync.Pool` (via the `NewWithPool` constructor) to reduce allocations of `Element` structs
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go
similarity index 58%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go
index 096023ef2..804a34444 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go
@@ -1,29 +1,40 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
-package utils
+// Package list implements a doubly linked list.
+//
+// To iterate over a list (where l is a *List[T]):
+//
+// for e := l.Front(); e != nil; e = e.Next() {
+// // do something with e.Value
+// }
+package list
-// Linked list implementation from the Go standard library.
+import "sync"
-// ByteIntervalElement is an element of a linked list.
-type ByteIntervalElement struct {
+func NewPool[T any]() *sync.Pool {
+ return &sync.Pool{New: func() any { return &Element[T]{} }}
+}
+
+// Element is an element of a linked list.
+type Element[T any] struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
- next, prev *ByteIntervalElement
+ next, prev *Element[T]
// The list to which this element belongs.
- list *ByteIntervalList
+ list *List[T]
// The value stored with this element.
- Value ByteInterval
+ Value T
}
// Next returns the next list element or nil.
-func (e *ByteIntervalElement) Next() *ByteIntervalElement {
+func (e *Element[T]) Next() *Element[T] {
if p := e.next; e.list != nil && p != &e.list.root {
return p
}
@@ -31,36 +42,49 @@ func (e *ByteIntervalElement) Next() *ByteIntervalElement {
}
// Prev returns the previous list element or nil.
-func (e *ByteIntervalElement) Prev() *ByteIntervalElement {
+func (e *Element[T]) Prev() *Element[T] {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
-// ByteIntervalList is a linked list of ByteIntervals.
-type ByteIntervalList struct {
- root ByteIntervalElement // sentinel list element, only &root, root.prev, and root.next are used
- len int // current list length excluding (this) sentinel element
+func (e *Element[T]) List() *List[T] {
+ return e.list
+}
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List[T any] struct {
+ root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length excluding (this) sentinel element
+
+ pool *sync.Pool
}
// Init initializes or clears list l.
-func (l *ByteIntervalList) Init() *ByteIntervalList {
+func (l *List[T]) Init() *List[T] {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
-// NewByteIntervalList returns an initialized list.
-func NewByteIntervalList() *ByteIntervalList { return new(ByteIntervalList).Init() }
+// New returns an initialized list.
+func New[T any]() *List[T] { return new(List[T]).Init() }
+
+// NewWithPool returns an initialized list, using a sync.Pool for list elements.
+func NewWithPool[T any](pool *sync.Pool) *List[T] {
+ l := &List[T]{pool: pool}
+ return l.Init()
+}
// Len returns the number of elements of list l.
// The complexity is O(1).
-func (l *ByteIntervalList) Len() int { return l.len }
+func (l *List[T]) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
-func (l *ByteIntervalList) Front() *ByteIntervalElement {
+func (l *List[T]) Front() *Element[T] {
if l.len == 0 {
return nil
}
@@ -68,7 +92,7 @@ func (l *ByteIntervalList) Front() *ByteIntervalElement {
}
// Back returns the last element of list l or nil if the list is empty.
-func (l *ByteIntervalList) Back() *ByteIntervalElement {
+func (l *List[T]) Back() *Element[T] {
if l.len == 0 {
return nil
}
@@ -76,60 +100,83 @@ func (l *ByteIntervalList) Back() *ByteIntervalElement {
}
// lazyInit lazily initializes a zero List value.
-func (l *ByteIntervalList) lazyInit() {
+func (l *List[T]) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
-func (l *ByteIntervalList) insert(e, at *ByteIntervalElement) *ByteIntervalElement {
- n := at.next
- at.next = e
+func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
e.prev = at
- e.next = n
- n.prev = e
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
-func (l *ByteIntervalList) insertValue(v ByteInterval, at *ByteIntervalElement) *ByteIntervalElement {
- return l.insert(&ByteIntervalElement{Value: v}, at)
+func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
+ var e *Element[T]
+ if l.pool != nil {
+ e = l.pool.Get().(*Element[T])
+ } else {
+ e = &Element[T]{}
+ }
+ e.Value = v
+ return l.insert(e, at)
}
-// remove removes e from its list, decrements l.len, and returns e.
-func (l *ByteIntervalList) remove(e *ByteIntervalElement) *ByteIntervalElement {
+// remove removes e from its list, decrements l.len
+func (l *List[T]) remove(e *Element[T]) {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
+ if l.pool != nil {
+ l.pool.Put(e)
+ }
l.len--
- return e
+}
+
+// move moves e to next to at.
+func (l *List[T]) move(e, at *Element[T]) {
+ if e == at {
+ return
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
}
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
-func (l *ByteIntervalList) Remove(e *ByteIntervalElement) ByteInterval {
+func (l *List[T]) Remove(e *Element[T]) T {
+ v := e.Value
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash
l.remove(e)
}
- return e.Value
+ return v
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *ByteIntervalList) PushFront(v ByteInterval) *ByteIntervalElement {
+func (l *List[T]) PushFront(v T) *Element[T] {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *ByteIntervalList) PushBack(v ByteInterval) *ByteIntervalElement {
+func (l *List[T]) PushBack(v T) *Element[T] {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
@@ -137,7 +184,7 @@ func (l *ByteIntervalList) PushBack(v ByteInterval) *ByteIntervalElement {
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
-func (l *ByteIntervalList) InsertBefore(v ByteInterval, mark *ByteIntervalElement) *ByteIntervalElement {
+func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
if mark.list != l {
return nil
}
@@ -148,7 +195,7 @@ func (l *ByteIntervalList) InsertBefore(v ByteInterval, mark *ByteIntervalElemen
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
-func (l *ByteIntervalList) InsertAfter(v ByteInterval, mark *ByteIntervalElement) *ByteIntervalElement {
+func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
if mark.list != l {
return nil
}
@@ -159,57 +206,57 @@ func (l *ByteIntervalList) InsertAfter(v ByteInterval, mark *ByteIntervalElement
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
-func (l *ByteIntervalList) MoveToFront(e *ByteIntervalElement) {
+func (l *List[T]) MoveToFront(e *Element[T]) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
- l.insert(l.remove(e), &l.root)
+ l.move(e, &l.root)
}
// MoveToBack moves element e to the back of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
-func (l *ByteIntervalList) MoveToBack(e *ByteIntervalElement) {
+func (l *List[T]) MoveToBack(e *Element[T]) {
if e.list != l || l.root.prev == e {
return
}
// see comment in List.Remove about initialization of l
- l.insert(l.remove(e), l.root.prev)
+ l.move(e, l.root.prev)
}
// MoveBefore moves element e to its new position before mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
-func (l *ByteIntervalList) MoveBefore(e, mark *ByteIntervalElement) {
+func (l *List[T]) MoveBefore(e, mark *Element[T]) {
if e.list != l || e == mark || mark.list != l {
return
}
- l.insert(l.remove(e), mark.prev)
+ l.move(e, mark.prev)
}
// MoveAfter moves element e to its new position after mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
-func (l *ByteIntervalList) MoveAfter(e, mark *ByteIntervalElement) {
+func (l *List[T]) MoveAfter(e, mark *Element[T]) {
if e.list != l || e == mark || mark.list != l {
return
}
- l.insert(l.remove(e), mark)
+ l.move(e, mark)
}
-// PushBackList inserts a copy of an other list at the back of list l.
+// PushBackList inserts a copy of another list at the back of list l.
// The lists l and other may be the same. They must not be nil.
-func (l *ByteIntervalList) PushBackList(other *ByteIntervalList) {
+func (l *List[T]) PushBackList(other *List[T]) {
l.lazyInit()
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
l.insertValue(e.Value, l.root.prev)
}
}
-// PushFrontList inserts a copy of an other list at the front of list l.
+// PushFrontList inserts a copy of another list at the front of list l.
// The lists l and other may be the same. They must not be nil.
-func (l *ByteIntervalList) PushFrontList(other *ByteIntervalList) {
+func (l *List[T]) PushFrontList(other *List[T]) {
l.lazyInit()
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
l.insertValue(e.Value, &l.root)
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go b/vendor/github.com/quic-go/quic-go/internal/utils/log.go
similarity index 98%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/log.go
index e27f01b4a..89b52c0d9 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/log.go
@@ -125,7 +125,7 @@ func readLoggingEnv() LogLevel {
case "error":
return LogLevelError
default:
- fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/lucas-clemente/quic-go/wiki/Logging")
+ fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/quic-go/quic-go/wiki/Logging")
return LogLevelNothing
}
}
diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go b/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go
new file mode 100644
index 000000000..d191f7515
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go
@@ -0,0 +1,72 @@
+package utils
+
+import (
+ "math"
+ "time"
+
+ "golang.org/x/exp/constraints"
+)
+
+// InfDuration is a duration of infinite length
+const InfDuration = time.Duration(math.MaxInt64)
+
+func Max[T constraints.Ordered](a, b T) T {
+ if a < b {
+ return b
+ }
+ return a
+}
+
+func Min[T constraints.Ordered](a, b T) T {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// MinNonZeroDuration return the minimum duration that's not zero.
+func MinNonZeroDuration(a, b time.Duration) time.Duration {
+ if a == 0 {
+ return b
+ }
+ if b == 0 {
+ return a
+ }
+ return Min(a, b)
+}
+
+// AbsDuration returns the absolute value of a time duration
+func AbsDuration(d time.Duration) time.Duration {
+ if d >= 0 {
+ return d
+ }
+ return -d
+}
+
+// MinTime returns the earlier time
+func MinTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return b
+ }
+ return a
+}
+
+// MinNonZeroTime returns the earlist time that is not time.Time{}
+// If both a and b are time.Time{}, it returns time.Time{}
+func MinNonZeroTime(a, b time.Time) time.Time {
+ if a.IsZero() {
+ return b
+ }
+ if b.IsZero() {
+ return a
+ }
+ return MinTime(a, b)
+}
+
+// MaxTime returns the later time
+func MaxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rand.go b/vendor/github.com/quic-go/quic-go/internal/utils/rand.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/rand.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/rand.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
similarity index 92%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
index 66642ba8f..527539e1e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
@@ -3,7 +3,7 @@ package utils
import (
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
const (
@@ -55,7 +55,7 @@ func (r *RTTStats) PTO(includeMaxAckDelay bool) time.Duration {
if r.SmoothedRTT() == 0 {
return 2 * defaultInitialRTT
}
- pto := r.SmoothedRTT() + MaxDuration(4*r.MeanDeviation(), protocol.TimerGranularity)
+ pto := r.SmoothedRTT() + Max(4*r.MeanDeviation(), protocol.TimerGranularity)
if includeMaxAckDelay {
pto += r.MaxAckDelay()
}
@@ -122,6 +122,6 @@ func (r *RTTStats) OnConnectionMigration() {
// is larger. The mean deviation is increased to the most recent deviation if
// it's larger.
func (r *RTTStats) ExpireSmoothedMetrics() {
- r.meanDeviation = MaxDuration(r.meanDeviation, AbsDuration(r.smoothedRTT-r.latestRTT))
- r.smoothedRTT = MaxDuration(r.smoothedRTT, r.latestRTT)
+ r.meanDeviation = Max(r.meanDeviation, AbsDuration(r.smoothedRTT-r.latestRTT))
+ r.smoothedRTT = Max(r.smoothedRTT, r.latestRTT)
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go b/vendor/github.com/quic-go/quic-go/internal/utils/timer.go
similarity index 94%
rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go
rename to vendor/github.com/quic-go/quic-go/internal/utils/timer.go
index a4f5e67aa..361106c8a 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/timer.go
@@ -47,6 +47,10 @@ func (t *Timer) SetRead() {
t.read = true
}
+func (t *Timer) Deadline() time.Time {
+ return t.deadline
+}
+
// Stop stops the timer
func (t *Timer) Stop() {
t.t.Stop()
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go
similarity index 88%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go
index e5280a737..5b01649a3 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go
@@ -6,9 +6,9 @@ import (
"sort"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/quicvarint"
)
var errInvalidAckRanges = errors.New("AckFrame: ACK frame contains invalid ACK ranges")
@@ -29,7 +29,7 @@ func parseAckFrame(r *bytes.Reader, ackDelayExponent uint8, _ protocol.VersionNu
}
ecn := typeByte&0x1 > 0
- frame := &AckFrame{}
+ frame := GetAckFrame()
la, err := quicvarint.Read(r)
if err != nil {
@@ -106,41 +106,41 @@ func parseAckFrame(r *bytes.Reader, ackDelayExponent uint8, _ protocol.VersionNu
return frame, nil
}
-// Write writes an ACK frame.
-func (f *AckFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
+// Append appends an ACK frame.
+func (f *AckFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
hasECN := f.ECT0 > 0 || f.ECT1 > 0 || f.ECNCE > 0
if hasECN {
- b.WriteByte(0x3)
+ b = append(b, 0b11)
} else {
- b.WriteByte(0x2)
+ b = append(b, 0b10)
}
- quicvarint.Write(b, uint64(f.LargestAcked()))
- quicvarint.Write(b, encodeAckDelay(f.DelayTime))
+ b = quicvarint.Append(b, uint64(f.LargestAcked()))
+ b = quicvarint.Append(b, encodeAckDelay(f.DelayTime))
numRanges := f.numEncodableAckRanges()
- quicvarint.Write(b, uint64(numRanges-1))
+ b = quicvarint.Append(b, uint64(numRanges-1))
// write the first range
_, firstRange := f.encodeAckRange(0)
- quicvarint.Write(b, firstRange)
+ b = quicvarint.Append(b, firstRange)
// write all the other range
for i := 1; i < numRanges; i++ {
gap, len := f.encodeAckRange(i)
- quicvarint.Write(b, gap)
- quicvarint.Write(b, len)
+ b = quicvarint.Append(b, gap)
+ b = quicvarint.Append(b, len)
}
if hasECN {
- quicvarint.Write(b, f.ECT0)
- quicvarint.Write(b, f.ECT1)
- quicvarint.Write(b, f.ECNCE)
+ b = quicvarint.Append(b, f.ECT0)
+ b = quicvarint.Append(b, f.ECT1)
+ b = quicvarint.Append(b, f.ECNCE)
}
- return nil
+ return b, nil
}
// Length of a written frame
-func (f *AckFrame) Length(version protocol.VersionNumber) protocol.ByteCount {
+func (f *AckFrame) Length(_ protocol.VersionNumber) protocol.ByteCount {
largestAcked := f.AckRanges[0].Largest
numRanges := f.numEncodableAckRanges()
diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame_pool.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame_pool.go
new file mode 100644
index 000000000..a0c6a21d7
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame_pool.go
@@ -0,0 +1,24 @@
+package wire
+
+import "sync"
+
+var ackFramePool = sync.Pool{New: func() any {
+ return &AckFrame{}
+}}
+
+func GetAckFrame() *AckFrame {
+ f := ackFramePool.Get().(*AckFrame)
+ f.AckRanges = f.AckRanges[:0]
+ f.ECNCE = 0
+ f.ECT0 = 0
+ f.ECT1 = 0
+ f.DelayTime = 0
+ return f
+}
+
+func PutAckFrame(f *AckFrame) {
+ if cap(f.AckRanges) > 4 {
+ return
+ }
+ ackFramePool.Put(f)
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go
similarity index 82%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go
index 0f4185801..03a1235ee 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go
@@ -1,6 +1,6 @@
package wire
-import "github.com/lucas-clemente/quic-go/internal/protocol"
+import "github.com/quic-go/quic-go/internal/protocol"
// AckRange is an ACK range
type AckRange struct {
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go
similarity index 81%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go
index 4ce49af6e..de2283b3b 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go
@@ -4,8 +4,8 @@ import (
"bytes"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A ConnectionCloseFrame is a CONNECTION_CLOSE frame
@@ -66,18 +66,18 @@ func (f *ConnectionCloseFrame) Length(protocol.VersionNumber) protocol.ByteCount
return length
}
-func (f *ConnectionCloseFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
+func (f *ConnectionCloseFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
if f.IsApplicationError {
- b.WriteByte(0x1d)
+ b = append(b, 0x1d)
} else {
- b.WriteByte(0x1c)
+ b = append(b, 0x1c)
}
- quicvarint.Write(b, f.ErrorCode)
+ b = quicvarint.Append(b, f.ErrorCode)
if !f.IsApplicationError {
- quicvarint.Write(b, f.FrameType)
+ b = quicvarint.Append(b, f.FrameType)
}
- quicvarint.Write(b, uint64(len(f.ReasonPhrase)))
- b.WriteString(f.ReasonPhrase)
- return nil
+ b = quicvarint.Append(b, uint64(len(f.ReasonPhrase)))
+ b = append(b, []byte(f.ReasonPhrase)...)
+ return b, nil
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go
similarity index 88%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go
index 6301c8783..99ffb21d0 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go
@@ -4,8 +4,8 @@ import (
"bytes"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A CryptoFrame is a CRYPTO frame
@@ -42,12 +42,12 @@ func parseCryptoFrame(r *bytes.Reader, _ protocol.VersionNumber) (*CryptoFrame,
return frame, nil
}
-func (f *CryptoFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x6)
- quicvarint.Write(b, uint64(f.Offset))
- quicvarint.Write(b, uint64(len(f.Data)))
- b.Write(f.Data)
- return nil
+func (f *CryptoFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x6)
+ b = quicvarint.Append(b, uint64(f.Offset))
+ b = quicvarint.Append(b, uint64(len(f.Data)))
+ b = append(b, f.Data...)
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go
similarity index 68%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go
index 459f04d12..b567af8a4 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go
@@ -3,8 +3,8 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A DataBlockedFrame is a DATA_BLOCKED frame
@@ -25,11 +25,10 @@ func parseDataBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*DataBloc
}, nil
}
-func (f *DataBlockedFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
- typeByte := uint8(0x14)
- b.WriteByte(typeByte)
- quicvarint.Write(b, uint64(f.MaximumData))
- return nil
+func (f *DataBlockedFrame) Append(b []byte, version protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x14)
+ b = quicvarint.Append(b, uint64(f.MaximumData))
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/datagram_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go
similarity index 84%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/datagram_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go
index 9d6e55cb0..756a23ffd 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/datagram_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go
@@ -4,8 +4,8 @@ import (
"bytes"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A DatagramFrame is a DATAGRAM frame
@@ -44,17 +44,17 @@ func parseDatagramFrame(r *bytes.Reader, _ protocol.VersionNumber) (*DatagramFra
return f, nil
}
-func (f *DatagramFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
+func (f *DatagramFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
typeByte := uint8(0x30)
if f.DataLenPresent {
- typeByte ^= 0x1
+ typeByte ^= 0b1
}
- b.WriteByte(typeByte)
+ b = append(b, typeByte)
if f.DataLenPresent {
- quicvarint.Write(b, uint64(len(f.Data)))
+ b = quicvarint.Append(b, uint64(len(f.Data)))
}
- b.Write(f.Data)
- return nil
+ b = append(b, f.Data...)
+ return b, nil
}
// MaxDataLen returns the maximum data length
diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go
new file mode 100644
index 000000000..d10820d6d
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go
@@ -0,0 +1,210 @@
+package wire
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+// ErrInvalidReservedBits is returned when the reserved bits are incorrect.
+// When this error is returned, parsing continues, and an ExtendedHeader is returned.
+// This is necessary because we need to decrypt the packet in that case,
+// in order to avoid a timing side-channel.
+var ErrInvalidReservedBits = errors.New("invalid reserved bits")
+
+// ExtendedHeader is the header of a QUIC packet.
+type ExtendedHeader struct {
+ Header
+
+ typeByte byte
+
+ KeyPhase protocol.KeyPhaseBit
+
+ PacketNumberLen protocol.PacketNumberLen
+ PacketNumber protocol.PacketNumber
+
+ parsedLen protocol.ByteCount
+}
+
+func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool /* reserved bits valid */, error) {
+ startLen := b.Len()
+ // read the (now unencrypted) first byte
+ var err error
+ h.typeByte, err = b.ReadByte()
+ if err != nil {
+ return false, err
+ }
+ if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil {
+ return false, err
+ }
+ reservedBitsValid, err := h.parseLongHeader(b, v)
+ if err != nil {
+ return false, err
+ }
+ h.parsedLen = protocol.ByteCount(startLen - b.Len())
+ return reservedBitsValid, err
+}
+
+func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) {
+ if err := h.readPacketNumber(b); err != nil {
+ return false, err
+ }
+ if h.typeByte&0xc != 0 {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error {
+ h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1
+ switch h.PacketNumberLen {
+ case protocol.PacketNumberLen1:
+ n, err := b.ReadByte()
+ if err != nil {
+ return err
+ }
+ h.PacketNumber = protocol.PacketNumber(n)
+ case protocol.PacketNumberLen2:
+ n, err := utils.BigEndian.ReadUint16(b)
+ if err != nil {
+ return err
+ }
+ h.PacketNumber = protocol.PacketNumber(n)
+ case protocol.PacketNumberLen3:
+ n, err := utils.BigEndian.ReadUint24(b)
+ if err != nil {
+ return err
+ }
+ h.PacketNumber = protocol.PacketNumber(n)
+ case protocol.PacketNumberLen4:
+ n, err := utils.BigEndian.ReadUint32(b)
+ if err != nil {
+ return err
+ }
+ h.PacketNumber = protocol.PacketNumber(n)
+ default:
+ return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen)
+ }
+ return nil
+}
+
+// Append appends the Header.
+func (h *ExtendedHeader) Append(b []byte, v protocol.VersionNumber) ([]byte, error) {
+ if h.DestConnectionID.Len() > protocol.MaxConnIDLen {
+ return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len())
+ }
+ if h.SrcConnectionID.Len() > protocol.MaxConnIDLen {
+ return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len())
+ }
+
+ var packetType uint8
+ if v == protocol.Version2 {
+ //nolint:exhaustive
+ switch h.Type {
+ case protocol.PacketTypeInitial:
+ packetType = 0b01
+ case protocol.PacketType0RTT:
+ packetType = 0b10
+ case protocol.PacketTypeHandshake:
+ packetType = 0b11
+ case protocol.PacketTypeRetry:
+ packetType = 0b00
+ }
+ } else {
+ //nolint:exhaustive
+ switch h.Type {
+ case protocol.PacketTypeInitial:
+ packetType = 0b00
+ case protocol.PacketType0RTT:
+ packetType = 0b01
+ case protocol.PacketTypeHandshake:
+ packetType = 0b10
+ case protocol.PacketTypeRetry:
+ packetType = 0b11
+ }
+ }
+ firstByte := 0xc0 | packetType<<4
+ if h.Type != protocol.PacketTypeRetry {
+ // Retry packets don't have a packet number
+ firstByte |= uint8(h.PacketNumberLen - 1)
+ }
+
+ b = append(b, firstByte)
+ b = append(b, make([]byte, 4)...)
+ binary.BigEndian.PutUint32(b[len(b)-4:], uint32(h.Version))
+ b = append(b, uint8(h.DestConnectionID.Len()))
+ b = append(b, h.DestConnectionID.Bytes()...)
+ b = append(b, uint8(h.SrcConnectionID.Len()))
+ b = append(b, h.SrcConnectionID.Bytes()...)
+
+ //nolint:exhaustive
+ switch h.Type {
+ case protocol.PacketTypeRetry:
+ b = append(b, h.Token...)
+ return b, nil
+ case protocol.PacketTypeInitial:
+ b = quicvarint.Append(b, uint64(len(h.Token)))
+ b = append(b, h.Token...)
+ }
+ b = quicvarint.AppendWithLen(b, uint64(h.Length), 2)
+ return appendPacketNumber(b, h.PacketNumber, h.PacketNumberLen)
+}
+
+// ParsedLen returns the number of bytes that were consumed when parsing the header
+func (h *ExtendedHeader) ParsedLen() protocol.ByteCount {
+ return h.parsedLen
+}
+
+// GetLength determines the length of the Header.
+func (h *ExtendedHeader) GetLength(_ protocol.VersionNumber) protocol.ByteCount {
+ length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */
+ if h.Type == protocol.PacketTypeInitial {
+ length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token))
+ }
+ return length
+}
+
+// Log logs the Header
+func (h *ExtendedHeader) Log(logger utils.Logger) {
+ var token string
+ if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry {
+ if len(h.Token) == 0 {
+ token = "Token: (empty), "
+ } else {
+ token = fmt.Sprintf("Token: %#x, ", h.Token)
+ }
+ if h.Type == protocol.PacketTypeRetry {
+ logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version)
+ return
+ }
+ }
+ logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version)
+}
+
+func appendPacketNumber(b []byte, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen) ([]byte, error) {
+ switch pnLen {
+ case protocol.PacketNumberLen1:
+ b = append(b, uint8(pn))
+ case protocol.PacketNumberLen2:
+ buf := make([]byte, 2)
+ binary.BigEndian.PutUint16(buf, uint16(pn))
+ b = append(b, buf...)
+ case protocol.PacketNumberLen3:
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(pn))
+ b = append(b, buf[1:]...)
+ case protocol.PacketNumberLen4:
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(pn))
+ b = append(b, buf...)
+ default:
+ return nil, fmt.Errorf("invalid packet number length: %d", pnLen)
+ }
+ return b, nil
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go b/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go
similarity index 55%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go
index f3a51ecb5..ec744d903 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go
@@ -6,37 +6,48 @@ import (
"fmt"
"reflect"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
)
type frameParser struct {
+ r bytes.Reader // cached bytes.Reader, so we don't have to repeatedly allocate them
+
ackDelayExponent uint8
supportsDatagrams bool
-
- version protocol.VersionNumber
}
+var _ FrameParser = &frameParser{}
+
// NewFrameParser creates a new frame parser.
-func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParser {
+func NewFrameParser(supportsDatagrams bool) *frameParser {
return &frameParser{
+ r: *bytes.NewReader(nil),
supportsDatagrams: supportsDatagrams,
- version: v,
}
}
// ParseNext parses the next frame.
// It skips PADDING frames.
-func (p *frameParser) ParseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) {
+func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (int, Frame, error) {
+ startLen := len(data)
+ p.r.Reset(data)
+ frame, err := p.parseNext(&p.r, encLevel, v)
+ n := startLen - p.r.Len()
+ p.r.Reset(nil)
+ return n, frame, err
+}
+
+func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) {
for r.Len() != 0 {
- typeByte, _ := r.ReadByte()
+ typeByte, _ := p.r.ReadByte()
if typeByte == 0x0 { // PADDING frame
continue
}
r.UnreadByte()
- f, err := p.parseFrame(r, typeByte, encLevel)
+ f, err := p.parseFrame(r, typeByte, encLevel, v)
if err != nil {
return nil, &qerr.TransportError{
FrameType: uint64(typeByte),
@@ -49,56 +60,56 @@ func (p *frameParser) ParseNext(r *bytes.Reader, encLevel protocol.EncryptionLev
return nil, nil
}
-func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel) (Frame, error) {
+func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) {
var frame Frame
var err error
if typeByte&0xf8 == 0x8 {
- frame, err = parseStreamFrame(r, p.version)
+ frame, err = parseStreamFrame(r, v)
} else {
switch typeByte {
case 0x1:
- frame, err = parsePingFrame(r, p.version)
+ frame, err = parsePingFrame(r, v)
case 0x2, 0x3:
ackDelayExponent := p.ackDelayExponent
if encLevel != protocol.Encryption1RTT {
ackDelayExponent = protocol.DefaultAckDelayExponent
}
- frame, err = parseAckFrame(r, ackDelayExponent, p.version)
+ frame, err = parseAckFrame(r, ackDelayExponent, v)
case 0x4:
- frame, err = parseResetStreamFrame(r, p.version)
+ frame, err = parseResetStreamFrame(r, v)
case 0x5:
- frame, err = parseStopSendingFrame(r, p.version)
+ frame, err = parseStopSendingFrame(r, v)
case 0x6:
- frame, err = parseCryptoFrame(r, p.version)
+ frame, err = parseCryptoFrame(r, v)
case 0x7:
- frame, err = parseNewTokenFrame(r, p.version)
+ frame, err = parseNewTokenFrame(r, v)
case 0x10:
- frame, err = parseMaxDataFrame(r, p.version)
+ frame, err = parseMaxDataFrame(r, v)
case 0x11:
- frame, err = parseMaxStreamDataFrame(r, p.version)
+ frame, err = parseMaxStreamDataFrame(r, v)
case 0x12, 0x13:
- frame, err = parseMaxStreamsFrame(r, p.version)
+ frame, err = parseMaxStreamsFrame(r, v)
case 0x14:
- frame, err = parseDataBlockedFrame(r, p.version)
+ frame, err = parseDataBlockedFrame(r, v)
case 0x15:
- frame, err = parseStreamDataBlockedFrame(r, p.version)
+ frame, err = parseStreamDataBlockedFrame(r, v)
case 0x16, 0x17:
- frame, err = parseStreamsBlockedFrame(r, p.version)
+ frame, err = parseStreamsBlockedFrame(r, v)
case 0x18:
- frame, err = parseNewConnectionIDFrame(r, p.version)
+ frame, err = parseNewConnectionIDFrame(r, v)
case 0x19:
- frame, err = parseRetireConnectionIDFrame(r, p.version)
+ frame, err = parseRetireConnectionIDFrame(r, v)
case 0x1a:
- frame, err = parsePathChallengeFrame(r, p.version)
+ frame, err = parsePathChallengeFrame(r, v)
case 0x1b:
- frame, err = parsePathResponseFrame(r, p.version)
+ frame, err = parsePathResponseFrame(r, v)
case 0x1c, 0x1d:
- frame, err = parseConnectionCloseFrame(r, p.version)
+ frame, err = parseConnectionCloseFrame(r, v)
case 0x1e:
- frame, err = parseHandshakeDoneFrame(r, p.version)
+ frame, err = parseHandshakeDoneFrame(r, v)
case 0x30, 0x31:
if p.supportsDatagrams {
- frame, err = parseDatagramFrame(r, p.version)
+ frame, err = parseDatagramFrame(r, v)
break
}
fallthrough
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go
similarity index 74%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go
index 158d659f0..7bbc0e888 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go
@@ -3,7 +3,7 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// A HandshakeDoneFrame is a HANDSHAKE_DONE frame
@@ -17,9 +17,8 @@ func parseHandshakeDoneFrame(r *bytes.Reader, _ protocol.VersionNumber) (*Handsh
return &HandshakeDoneFrame{}, nil
}
-func (f *HandshakeDoneFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x1e)
- return nil
+func (f *HandshakeDoneFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ return append(b, 0x1e), nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go b/vendor/github.com/quic-go/quic-go/internal/wire/header.go
similarity index 62%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/header.go
index f6a31ee0e..4d3c5049a 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/header.go
@@ -7,9 +7,9 @@ import (
"fmt"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/quicvarint"
)
// ParseConnectionID parses the destination connection ID of a packet.
@@ -17,23 +17,77 @@ import (
// That means that the connection ID must not be used after the packet buffer is released.
func ParseConnectionID(data []byte, shortHeaderConnIDLen int) (protocol.ConnectionID, error) {
if len(data) == 0 {
- return nil, io.EOF
+ return protocol.ConnectionID{}, io.EOF
}
- isLongHeader := data[0]&0x80 > 0
- if !isLongHeader {
+ if !IsLongHeaderPacket(data[0]) {
if len(data) < shortHeaderConnIDLen+1 {
- return nil, io.EOF
+ return protocol.ConnectionID{}, io.EOF
}
- return protocol.ConnectionID(data[1 : 1+shortHeaderConnIDLen]), nil
+ return protocol.ParseConnectionID(data[1 : 1+shortHeaderConnIDLen]), nil
}
if len(data) < 6 {
- return nil, io.EOF
+ return protocol.ConnectionID{}, io.EOF
}
destConnIDLen := int(data[5])
+ if destConnIDLen > protocol.MaxConnIDLen {
+ return protocol.ConnectionID{}, protocol.ErrInvalidConnectionIDLen
+ }
if len(data) < 6+destConnIDLen {
- return nil, io.EOF
+ return protocol.ConnectionID{}, io.EOF
+ }
+ return protocol.ParseConnectionID(data[6 : 6+destConnIDLen]), nil
+}
+
+// ParseArbitraryLenConnectionIDs parses the most general form of a Long Header packet,
+// using only the version-independent packet format as described in Section 5.1 of RFC 8999:
+// https://datatracker.ietf.org/doc/html/rfc8999#section-5.1.
+// This function should only be called on Long Header packets for which we don't support the version.
+func ParseArbitraryLenConnectionIDs(data []byte) (bytesParsed int, dest, src protocol.ArbitraryLenConnectionID, _ error) {
+ r := bytes.NewReader(data)
+ remaining := r.Len()
+ src, dest, err := parseArbitraryLenConnectionIDs(r)
+ return remaining - r.Len(), src, dest, err
+}
+
+func parseArbitraryLenConnectionIDs(r *bytes.Reader) (dest, src protocol.ArbitraryLenConnectionID, _ error) {
+ r.Seek(5, io.SeekStart) // skip first byte and version field
+ destConnIDLen, err := r.ReadByte()
+ if err != nil {
+ return nil, nil, err
+ }
+ destConnID := make(protocol.ArbitraryLenConnectionID, destConnIDLen)
+ if _, err := io.ReadFull(r, destConnID); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = io.EOF
+ }
+ return nil, nil, err
+ }
+ srcConnIDLen, err := r.ReadByte()
+ if err != nil {
+ return nil, nil, err
+ }
+ srcConnID := make(protocol.ArbitraryLenConnectionID, srcConnIDLen)
+ if _, err := io.ReadFull(r, srcConnID); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = io.EOF
+ }
+ return nil, nil, err
+ }
+ return destConnID, srcConnID, nil
+}
+
+// IsLongHeaderPacket says if this is a Long Header packet
+func IsLongHeaderPacket(firstByte byte) bool {
+ return firstByte&0x80 > 0
+}
+
+// ParseVersion parses the QUIC version.
+// It should only be called for Long Header packets (Short Header packets don't contain a version number).
+func ParseVersion(data []byte) (protocol.VersionNumber, error) {
+ if len(data) < 5 {
+ return 0, io.EOF
}
- return protocol.ConnectionID(data[6 : 6+destConnIDLen]), nil
+ return protocol.VersionNumber(binary.BigEndian.Uint32(data[1:5])), nil
}
// IsVersionNegotiationPacket says if this is a version negotiation packet
@@ -41,7 +95,7 @@ func IsVersionNegotiationPacket(b []byte) bool {
if len(b) < 5 {
return false
}
- return b[0]&0x80 > 0 && b[1] == 0 && b[2] == 0 && b[3] == 0 && b[4] == 0
+ return IsLongHeaderPacket(b[0]) && b[1] == 0 && b[2] == 0 && b[3] == 0 && b[4] == 0
}
// Is0RTTPacket says if this is a 0-RTT packet.
@@ -50,7 +104,7 @@ func Is0RTTPacket(b []byte) bool {
if len(b) < 5 {
return false
}
- if b[0]&0x80 == 0 {
+ if !IsLongHeaderPacket(b[0]) {
return false
}
version := protocol.VersionNumber(binary.BigEndian.Uint32(b[1:5]))
@@ -67,9 +121,8 @@ var ErrUnsupportedVersion = errors.New("unsupported version")
// The Header is the version independent part of the header
type Header struct {
- IsLongHeader bool
- typeByte byte
- Type protocol.PacketType
+ typeByte byte
+ Type protocol.PacketType
Version protocol.VersionNumber
SrcConnectionID protocol.ConnectionID
@@ -86,24 +139,22 @@ type Header struct {
// If the packet has a long header, the packet is cut according to the length field.
// If we understand the version, the packet is header up unto the packet number.
// Otherwise, only the invariant part of the header is parsed.
-func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* packet data */, []byte /* rest */, error) {
- hdr, err := parseHeader(bytes.NewReader(data), shortHeaderConnIDLen)
+func ParsePacket(data []byte) (*Header, []byte, []byte, error) {
+ if len(data) == 0 || !IsLongHeaderPacket(data[0]) {
+ return nil, nil, nil, errors.New("not a long header packet")
+ }
+ hdr, err := parseHeader(bytes.NewReader(data))
if err != nil {
if err == ErrUnsupportedVersion {
return hdr, nil, nil, ErrUnsupportedVersion
}
return nil, nil, nil, err
}
- var rest []byte
- if hdr.IsLongHeader {
- if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length {
- return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length)
- }
- packetLen := int(hdr.ParsedLen() + hdr.Length)
- rest = data[packetLen:]
- data = data[:packetLen]
+ if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length {
+ return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length)
}
- return hdr, data, rest, nil
+ packetLen := int(hdr.ParsedLen() + hdr.Length)
+ return hdr, data[:packetLen], data[packetLen:], nil
}
// ParseHeader parses the header.
@@ -111,43 +162,17 @@ func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* pack
// For long header packets:
// * if we understand the version: up to the packet number
// * if not, only the invariant part of the header
-func parseHeader(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) {
+func parseHeader(b *bytes.Reader) (*Header, error) {
startLen := b.Len()
- h, err := parseHeaderImpl(b, shortHeaderConnIDLen)
- if err != nil {
- return h, err
- }
- h.parsedLen = protocol.ByteCount(startLen - b.Len())
- return h, err
-}
-
-func parseHeaderImpl(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) {
typeByte, err := b.ReadByte()
if err != nil {
return nil, err
}
- h := &Header{
- typeByte: typeByte,
- IsLongHeader: typeByte&0x80 > 0,
- }
-
- if !h.IsLongHeader {
- if h.typeByte&0x40 == 0 {
- return nil, errors.New("not a QUIC packet")
- }
- if err := h.parseShortHeader(b, shortHeaderConnIDLen); err != nil {
- return nil, err
- }
- return h, nil
- }
- return h, h.parseLongHeader(b)
-}
-
-func (h *Header) parseShortHeader(b *bytes.Reader, shortHeaderConnIDLen int) error {
- var err error
- h.DestConnectionID, err = protocol.ReadConnectionID(b, shortHeaderConnIDLen)
- return err
+ h := &Header{typeByte: typeByte}
+ err = h.parseLongHeader(b)
+ h.parsedLen = protocol.ByteCount(startLen - b.Len())
+ return h, err
}
func (h *Header) parseLongHeader(b *bytes.Reader) error {
@@ -267,8 +292,5 @@ func (h *Header) toExtendedHeader() *ExtendedHeader {
// PacketType is the type of the packet, for logging purposes
func (h *Header) PacketType() string {
- if h.IsLongHeader {
- return h.Type.String()
- }
- return "1-RTT"
+ return h.Type.String()
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go
similarity index 53%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/interface.go
index 99fdc80fb..7e0f9a03e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go
@@ -1,19 +1,17 @@
package wire
import (
- "bytes"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// A Frame in QUIC
type Frame interface {
- Write(b *bytes.Buffer, version protocol.VersionNumber) error
+ Append(b []byte, version protocol.VersionNumber) ([]byte, error)
Length(version protocol.VersionNumber) protocol.ByteCount
}
// A FrameParser parses QUIC frames, one by one.
type FrameParser interface {
- ParseNext(*bytes.Reader, protocol.EncryptionLevel) (Frame, error)
+ ParseNext([]byte, protocol.EncryptionLevel, protocol.VersionNumber) (int, Frame, error)
SetAckDelayExponent(uint8)
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go b/vendor/github.com/quic-go/quic-go/internal/wire/log.go
similarity index 96%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/log.go
index 30cf94243..ec7d45d86 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/log.go
@@ -4,8 +4,8 @@ import (
"fmt"
"strings"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
// LogFrame logs a frame, either sent or received
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go
similarity index 65%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go
index a9a092482..427c81101 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go
@@ -3,8 +3,8 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A MaxDataFrame carries flow control information for the connection
@@ -28,13 +28,13 @@ func parseMaxDataFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxDataFrame
}
// Write writes a MAX_STREAM_DATA frame
-func (f *MaxDataFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
- b.WriteByte(0x10)
- quicvarint.Write(b, uint64(f.MaximumData))
- return nil
+func (f *MaxDataFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x10)
+ b = quicvarint.Append(b, uint64(f.MaximumData))
+ return b, nil
}
// Length of a written frame
-func (f *MaxDataFrame) Length(version protocol.VersionNumber) protocol.ByteCount {
+func (f *MaxDataFrame) Length(_ protocol.VersionNumber) protocol.ByteCount {
return 1 + quicvarint.Len(uint64(f.MaximumData))
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go
similarity index 72%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go
index 728ecbe8b..4218c09bd 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go
@@ -3,8 +3,8 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A MaxStreamDataFrame is a MAX_STREAM_DATA frame
@@ -33,11 +33,11 @@ func parseMaxStreamDataFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxStr
}, nil
}
-func (f *MaxStreamDataFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
- b.WriteByte(0x11)
- quicvarint.Write(b, uint64(f.StreamID))
- quicvarint.Write(b, uint64(f.MaximumStreamData))
- return nil
+func (f *MaxStreamDataFrame) Append(b []byte, version protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x11)
+ b = quicvarint.Append(b, uint64(f.StreamID))
+ b = quicvarint.Append(b, uint64(f.MaximumStreamData))
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go
similarity index 77%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go
index 73d7e13ea..f417127c4 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go
@@ -4,8 +4,8 @@ import (
"bytes"
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A MaxStreamsFrame is a MAX_STREAMS frame
@@ -38,15 +38,15 @@ func parseMaxStreamsFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxStream
return f, nil
}
-func (f *MaxStreamsFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
+func (f *MaxStreamsFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
switch f.Type {
case protocol.StreamTypeBidi:
- b.WriteByte(0x12)
+ b = append(b, 0x12)
case protocol.StreamTypeUni:
- b.WriteByte(0x13)
+ b = append(b, 0x13)
}
- quicvarint.Write(b, uint64(f.MaxStreamNum))
- return nil
+ b = quicvarint.Append(b, uint64(f.MaxStreamNum))
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go
similarity index 76%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go
index 1a017ba99..5f6ab9980 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go
@@ -5,8 +5,8 @@ import (
"fmt"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A NewConnectionIDFrame is a NEW_CONNECTION_ID frame
@@ -38,9 +38,6 @@ func parseNewConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewC
if err != nil {
return nil, err
}
- if connIDLen > protocol.MaxConnIDLen {
- return nil, fmt.Errorf("invalid connection ID length: %d", connIDLen)
- }
connID, err := protocol.ReadConnectionID(r, int(connIDLen))
if err != nil {
return nil, err
@@ -60,18 +57,18 @@ func parseNewConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewC
return frame, nil
}
-func (f *NewConnectionIDFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x18)
- quicvarint.Write(b, f.SequenceNumber)
- quicvarint.Write(b, f.RetirePriorTo)
+func (f *NewConnectionIDFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x18)
+ b = quicvarint.Append(b, f.SequenceNumber)
+ b = quicvarint.Append(b, f.RetirePriorTo)
connIDLen := f.ConnectionID.Len()
if connIDLen > protocol.MaxConnIDLen {
- return fmt.Errorf("invalid connection ID length: %d", connIDLen)
+ return nil, fmt.Errorf("invalid connection ID length: %d", connIDLen)
}
- b.WriteByte(uint8(connIDLen))
- b.Write(f.ConnectionID.Bytes())
- b.Write(f.StatelessResetToken[:])
- return nil
+ b = append(b, uint8(connIDLen))
+ b = append(b, f.ConnectionID.Bytes()...)
+ b = append(b, f.StatelessResetToken[:]...)
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go
similarity index 74%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go
index 3d5d5c3a1..cc1d58196 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go
@@ -5,8 +5,8 @@ import (
"errors"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A NewTokenFrame is a NEW_TOKEN frame
@@ -35,11 +35,11 @@ func parseNewTokenFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewTokenFra
return &NewTokenFrame{Token: token}, nil
}
-func (f *NewTokenFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x7)
- quicvarint.Write(b, uint64(len(f.Token)))
- b.Write(f.Token)
- return nil
+func (f *NewTokenFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x7)
+ b = quicvarint.Append(b, uint64(len(f.Token)))
+ b = append(b, f.Token...)
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go
similarity index 75%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go
index 5ec821772..5d32865e2 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go
@@ -4,7 +4,7 @@ import (
"bytes"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// A PathChallengeFrame is a PATH_CHALLENGE frame
@@ -26,10 +26,10 @@ func parsePathChallengeFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PathCh
return frame, nil
}
-func (f *PathChallengeFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x1a)
- b.Write(f.Data[:])
- return nil
+func (f *PathChallengeFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x1a)
+ b = append(b, f.Data[:]...)
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go
similarity index 75%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go
index 262819f89..5c49e1227 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go
@@ -4,7 +4,7 @@ import (
"bytes"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// A PathResponseFrame is a PATH_RESPONSE frame
@@ -26,10 +26,10 @@ func parsePathResponseFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PathRes
return frame, nil
}
-func (f *PathResponseFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x1b)
- b.Write(f.Data[:])
- return nil
+func (f *PathResponseFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x1b)
+ b = append(b, f.Data[:]...)
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go
similarity index 55%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go
index dc029e45f..ba32d1670 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go
@@ -3,7 +3,7 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// A PingFrame is a PING frame
@@ -16,12 +16,11 @@ func parsePingFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PingFrame, erro
return &PingFrame{}, nil
}
-func (f *PingFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
- b.WriteByte(0x1)
- return nil
+func (f *PingFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ return append(b, 0x1), nil
}
// Length of a written frame
-func (f *PingFrame) Length(version protocol.VersionNumber) protocol.ByteCount {
+func (f *PingFrame) Length(_ protocol.VersionNumber) protocol.ByteCount {
return 1
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/pool.go b/vendor/github.com/quic-go/quic-go/internal/wire/pool.go
similarity index 90%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/pool.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/pool.go
index c057395e7..18ab43793 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/pool.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/pool.go
@@ -3,7 +3,7 @@ package wire
import (
"sync"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
var pool sync.Pool
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go
similarity index 74%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go
index 69bbc2b9d..462138130 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go
@@ -3,9 +3,9 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A ResetStreamFrame is a RESET_STREAM frame in QUIC
@@ -44,12 +44,12 @@ func parseResetStreamFrame(r *bytes.Reader, _ protocol.VersionNumber) (*ResetStr
}, nil
}
-func (f *ResetStreamFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x4)
- quicvarint.Write(b, uint64(f.StreamID))
- quicvarint.Write(b, uint64(f.ErrorCode))
- quicvarint.Write(b, uint64(f.FinalSize))
- return nil
+func (f *ResetStreamFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x4)
+ b = quicvarint.Append(b, uint64(f.StreamID))
+ b = quicvarint.Append(b, uint64(f.ErrorCode))
+ b = quicvarint.Append(b, uint64(f.FinalSize))
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go
similarity index 70%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go
index 0f7e58c87..3e4f58ac3 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go
@@ -3,8 +3,8 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A RetireConnectionIDFrame is a RETIRE_CONNECTION_ID frame
@@ -24,10 +24,10 @@ func parseRetireConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*R
return &RetireConnectionIDFrame{SequenceNumber: seq}, nil
}
-func (f *RetireConnectionIDFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x19)
- quicvarint.Write(b, f.SequenceNumber)
- return nil
+func (f *RetireConnectionIDFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x19)
+ b = quicvarint.Append(b, f.SequenceNumber)
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go
new file mode 100644
index 000000000..69aa83411
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go
@@ -0,0 +1,73 @@
+package wire
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+)
+
+// ParseShortHeader parses a short header packet.
+// It must be called after header protection was removed.
+// Otherwise, the check for the reserved bits will (most likely) fail.
+func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.PacketNumber, _ protocol.PacketNumberLen, _ protocol.KeyPhaseBit, _ error) {
+ if len(data) == 0 {
+ return 0, 0, 0, 0, io.EOF
+ }
+ if data[0]&0x80 > 0 {
+ return 0, 0, 0, 0, errors.New("not a short header packet")
+ }
+ if data[0]&0x40 == 0 {
+ return 0, 0, 0, 0, errors.New("not a QUIC packet")
+ }
+ pnLen := protocol.PacketNumberLen(data[0]&0b11) + 1
+ if len(data) < 1+int(pnLen)+connIDLen {
+ return 0, 0, 0, 0, io.EOF
+ }
+
+ pos := 1 + connIDLen
+ var pn protocol.PacketNumber
+ switch pnLen {
+ case protocol.PacketNumberLen1:
+ pn = protocol.PacketNumber(data[pos])
+ case protocol.PacketNumberLen2:
+ pn = protocol.PacketNumber(utils.BigEndian.Uint16(data[pos : pos+2]))
+ case protocol.PacketNumberLen3:
+ pn = protocol.PacketNumber(utils.BigEndian.Uint24(data[pos : pos+3]))
+ case protocol.PacketNumberLen4:
+ pn = protocol.PacketNumber(utils.BigEndian.Uint32(data[pos : pos+4]))
+ default:
+ return 0, 0, 0, 0, fmt.Errorf("invalid packet number length: %d", pnLen)
+ }
+ kp := protocol.KeyPhaseZero
+ if data[0]&0b100 > 0 {
+ kp = protocol.KeyPhaseOne
+ }
+
+ var err error
+ if data[0]&0x18 != 0 {
+ err = ErrInvalidReservedBits
+ }
+ return 1 + connIDLen + int(pnLen), pn, pnLen, kp, err
+}
+
+// AppendShortHeader writes a short header.
+func AppendShortHeader(b []byte, connID protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) ([]byte, error) {
+ typeByte := 0x40 | uint8(pnLen-1)
+ if kp == protocol.KeyPhaseOne {
+ typeByte |= byte(1 << 2)
+ }
+ b = append(b, typeByte)
+ b = append(b, connID.Bytes()...)
+ return appendPacketNumber(b, pn, pnLen)
+}
+
+func ShortHeaderLen(dest protocol.ConnectionID, pnLen protocol.PacketNumberLen) protocol.ByteCount {
+ return 1 + protocol.ByteCount(dest.Len()) + protocol.ByteCount(pnLen)
+}
+
+func LogShortHeader(logger utils.Logger, dest protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) {
+ logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", dest, pn, pnLen, kp)
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go
similarity index 71%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go
index fb1160c1b..e47a0f4a8 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go
@@ -3,9 +3,9 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A StopSendingFrame is a STOP_SENDING frame
@@ -40,9 +40,9 @@ func (f *StopSendingFrame) Length(_ protocol.VersionNumber) protocol.ByteCount {
return 1 + quicvarint.Len(uint64(f.StreamID)) + quicvarint.Len(uint64(f.ErrorCode))
}
-func (f *StopSendingFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
- b.WriteByte(0x5)
- quicvarint.Write(b, uint64(f.StreamID))
- quicvarint.Write(b, uint64(f.ErrorCode))
- return nil
+func (f *StopSendingFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x5)
+ b = quicvarint.Append(b, uint64(f.StreamID))
+ b = quicvarint.Append(b, uint64(f.ErrorCode))
+ return b, nil
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go
similarity index 73%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go
index dc6d631a5..2d3fb07e0 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go
@@ -3,8 +3,8 @@ package wire
import (
"bytes"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A StreamDataBlockedFrame is a STREAM_DATA_BLOCKED frame
@@ -33,11 +33,11 @@ func parseStreamDataBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*St
}, nil
}
-func (f *StreamDataBlockedFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
- b.WriteByte(0x15)
- quicvarint.Write(b, uint64(f.StreamID))
- quicvarint.Write(b, uint64(f.MaximumStreamData))
- return nil
+func (f *StreamDataBlockedFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
+ b = append(b, 0x15)
+ b = quicvarint.Append(b, uint64(f.StreamID))
+ b = quicvarint.Append(b, uint64(f.MaximumStreamData))
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go
similarity index 87%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go
index 66340d169..ebf3101c4 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go
@@ -5,8 +5,8 @@ import (
"errors"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A StreamFrame of QUIC
@@ -26,9 +26,9 @@ func parseStreamFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StreamFrame,
return nil, err
}
- hasOffset := typeByte&0x4 > 0
- fin := typeByte&0x1 > 0
- hasDataLen := typeByte&0x2 > 0
+ hasOffset := typeByte&0b100 > 0
+ fin := typeByte&0b1 > 0
+ hasDataLen := typeByte&0b10 > 0
streamID, err := quicvarint.Read(r)
if err != nil {
@@ -84,32 +84,32 @@ func parseStreamFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StreamFrame,
}
// Write writes a STREAM frame
-func (f *StreamFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
+func (f *StreamFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
if len(f.Data) == 0 && !f.Fin {
- return errors.New("StreamFrame: attempting to write empty frame without FIN")
+ return nil, errors.New("StreamFrame: attempting to write empty frame without FIN")
}
typeByte := byte(0x8)
if f.Fin {
- typeByte ^= 0x1
+ typeByte ^= 0b1
}
hasOffset := f.Offset != 0
if f.DataLenPresent {
- typeByte ^= 0x2
+ typeByte ^= 0b10
}
if hasOffset {
- typeByte ^= 0x4
+ typeByte ^= 0b100
}
- b.WriteByte(typeByte)
- quicvarint.Write(b, uint64(f.StreamID))
+ b = append(b, typeByte)
+ b = quicvarint.Append(b, uint64(f.StreamID))
if hasOffset {
- quicvarint.Write(b, uint64(f.Offset))
+ b = quicvarint.Append(b, uint64(f.Offset))
}
if f.DataLenPresent {
- quicvarint.Write(b, uint64(f.DataLen()))
+ b = quicvarint.Append(b, uint64(f.DataLen()))
}
- b.Write(f.Data)
- return nil
+ b = append(b, f.Data...)
+ return b, nil
}
// Length returns the total length of the STREAM frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go
similarity index 78%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go
index f4066071f..5e556cb89 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go
@@ -4,8 +4,8 @@ import (
"bytes"
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/quicvarint"
)
// A StreamsBlockedFrame is a STREAMS_BLOCKED frame
@@ -38,15 +38,15 @@ func parseStreamsBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*Strea
return f, nil
}
-func (f *StreamsBlockedFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
+func (f *StreamsBlockedFrame) Append(b []byte, _ protocol.VersionNumber) ([]byte, error) {
switch f.Type {
case protocol.StreamTypeBidi:
- b.WriteByte(0x16)
+ b = append(b, 0x16)
case protocol.StreamTypeUni:
- b.WriteByte(0x17)
+ b = append(b, 0x17)
}
- quicvarint.Write(b, uint64(f.StreamLimit))
- return nil
+ b = quicvarint.Append(b, uint64(f.StreamLimit))
+ return b, nil
}
// Length of a written frame
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go
similarity index 77%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go
index e1f83cd65..a64638cbd 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go
@@ -2,6 +2,7 @@ package wire
import (
"bytes"
+ "encoding/binary"
"errors"
"fmt"
"io"
@@ -10,10 +11,10 @@ import (
"sort"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/quicvarint"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/quicvarint"
)
const transportParameterMarshalingVersion = 1
@@ -285,7 +286,7 @@ func (p *TransportParameters) readNumericTransportParameter(
return fmt.Errorf("initial_max_streams_uni too large: %d (maximum %d)", p.MaxUniStreamNum, protocol.MaxStreamCount)
}
case maxIdleTimeoutParameterID:
- p.MaxIdleTimeout = utils.MaxDuration(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Millisecond)
+ p.MaxIdleTimeout = utils.Max(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Millisecond)
case maxUDPPayloadSizeParameterID:
if val < 1200 {
return fmt.Errorf("invalid value for max_packet_size: %d (minimum 1200)", val)
@@ -302,6 +303,9 @@ func (p *TransportParameters) readNumericTransportParameter(
}
p.MaxAckDelay = time.Duration(val) * time.Millisecond
case activeConnectionIDLimitParameterID:
+ if val < 2 {
+ return fmt.Errorf("invalid value for active_connection_id_limit: %d (minimum 2)", val)
+ }
p.ActiveConnectionIDLimit = val
case maxDatagramFrameSizeParameterID:
p.MaxDatagramFrameSize = protocol.ByteCount(val)
@@ -313,94 +317,98 @@ func (p *TransportParameters) readNumericTransportParameter(
// Marshal the transport parameters
func (p *TransportParameters) Marshal(pers protocol.Perspective) []byte {
- b := &bytes.Buffer{}
+ // Typical Transport Parameters consume around 110 bytes, depending on the exact values,
+ // especially the lengths of the Connection IDs.
+ // Allocate 256 bytes, so we won't have to grow the slice in any case.
+ b := make([]byte, 0, 256)
// add a greased value
- quicvarint.Write(b, uint64(27+31*rand.Intn(100)))
+ b = quicvarint.Append(b, uint64(27+31*rand.Intn(100)))
length := rand.Intn(16)
- randomData := make([]byte, length)
- rand.Read(randomData)
- quicvarint.Write(b, uint64(length))
- b.Write(randomData)
+ b = quicvarint.Append(b, uint64(length))
+ b = b[:len(b)+length]
+ rand.Read(b[len(b)-length:])
// initial_max_stream_data_bidi_local
- p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal))
+ b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal))
// initial_max_stream_data_bidi_remote
- p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote))
+ b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote))
// initial_max_stream_data_uni
- p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni))
+ b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni))
// initial_max_data
- p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData))
+ b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData))
// initial_max_bidi_streams
- p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum))
+ b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum))
// initial_max_uni_streams
- p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum))
+ b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum))
// idle_timeout
- p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond))
+ b = p.marshalVarintParam(b, maxIdleTimeoutParameterID, uint64(p.MaxIdleTimeout/time.Millisecond))
// max_packet_size
- p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(protocol.MaxPacketBufferSize))
+ b = p.marshalVarintParam(b, maxUDPPayloadSizeParameterID, uint64(protocol.MaxPacketBufferSize))
// max_ack_delay
// Only send it if is different from the default value.
if p.MaxAckDelay != protocol.DefaultMaxAckDelay {
- p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond))
+ b = p.marshalVarintParam(b, maxAckDelayParameterID, uint64(p.MaxAckDelay/time.Millisecond))
}
// ack_delay_exponent
// Only send it if is different from the default value.
if p.AckDelayExponent != protocol.DefaultAckDelayExponent {
- p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent))
+ b = p.marshalVarintParam(b, ackDelayExponentParameterID, uint64(p.AckDelayExponent))
}
// disable_active_migration
if p.DisableActiveMigration {
- quicvarint.Write(b, uint64(disableActiveMigrationParameterID))
- quicvarint.Write(b, 0)
+ b = quicvarint.Append(b, uint64(disableActiveMigrationParameterID))
+ b = quicvarint.Append(b, 0)
}
if pers == protocol.PerspectiveServer {
// stateless_reset_token
if p.StatelessResetToken != nil {
- quicvarint.Write(b, uint64(statelessResetTokenParameterID))
- quicvarint.Write(b, 16)
- b.Write(p.StatelessResetToken[:])
+ b = quicvarint.Append(b, uint64(statelessResetTokenParameterID))
+ b = quicvarint.Append(b, 16)
+ b = append(b, p.StatelessResetToken[:]...)
}
// original_destination_connection_id
- quicvarint.Write(b, uint64(originalDestinationConnectionIDParameterID))
- quicvarint.Write(b, uint64(p.OriginalDestinationConnectionID.Len()))
- b.Write(p.OriginalDestinationConnectionID.Bytes())
+ b = quicvarint.Append(b, uint64(originalDestinationConnectionIDParameterID))
+ b = quicvarint.Append(b, uint64(p.OriginalDestinationConnectionID.Len()))
+ b = append(b, p.OriginalDestinationConnectionID.Bytes()...)
// preferred_address
if p.PreferredAddress != nil {
- quicvarint.Write(b, uint64(preferredAddressParameterID))
- quicvarint.Write(b, 4+2+16+2+1+uint64(p.PreferredAddress.ConnectionID.Len())+16)
+ b = quicvarint.Append(b, uint64(preferredAddressParameterID))
+ b = quicvarint.Append(b, 4+2+16+2+1+uint64(p.PreferredAddress.ConnectionID.Len())+16)
ipv4 := p.PreferredAddress.IPv4
- b.Write(ipv4[len(ipv4)-4:])
- utils.BigEndian.WriteUint16(b, p.PreferredAddress.IPv4Port)
- b.Write(p.PreferredAddress.IPv6)
- utils.BigEndian.WriteUint16(b, p.PreferredAddress.IPv6Port)
- b.WriteByte(uint8(p.PreferredAddress.ConnectionID.Len()))
- b.Write(p.PreferredAddress.ConnectionID.Bytes())
- b.Write(p.PreferredAddress.StatelessResetToken[:])
+ b = append(b, ipv4[len(ipv4)-4:]...)
+ b = append(b, []byte{0, 0}...)
+ binary.BigEndian.PutUint16(b[len(b)-2:], p.PreferredAddress.IPv4Port)
+ b = append(b, p.PreferredAddress.IPv6...)
+ b = append(b, []byte{0, 0}...)
+ binary.BigEndian.PutUint16(b[len(b)-2:], p.PreferredAddress.IPv6Port)
+ b = append(b, uint8(p.PreferredAddress.ConnectionID.Len()))
+ b = append(b, p.PreferredAddress.ConnectionID.Bytes()...)
+ b = append(b, p.PreferredAddress.StatelessResetToken[:]...)
}
}
// active_connection_id_limit
- p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit)
+ b = p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit)
// initial_source_connection_id
- quicvarint.Write(b, uint64(initialSourceConnectionIDParameterID))
- quicvarint.Write(b, uint64(p.InitialSourceConnectionID.Len()))
- b.Write(p.InitialSourceConnectionID.Bytes())
+ b = quicvarint.Append(b, uint64(initialSourceConnectionIDParameterID))
+ b = quicvarint.Append(b, uint64(p.InitialSourceConnectionID.Len()))
+ b = append(b, p.InitialSourceConnectionID.Bytes()...)
// retry_source_connection_id
if pers == protocol.PerspectiveServer && p.RetrySourceConnectionID != nil {
- quicvarint.Write(b, uint64(retrySourceConnectionIDParameterID))
- quicvarint.Write(b, uint64(p.RetrySourceConnectionID.Len()))
- b.Write(p.RetrySourceConnectionID.Bytes())
+ b = quicvarint.Append(b, uint64(retrySourceConnectionIDParameterID))
+ b = quicvarint.Append(b, uint64(p.RetrySourceConnectionID.Len()))
+ b = append(b, p.RetrySourceConnectionID.Bytes()...)
}
if p.MaxDatagramFrameSize != protocol.InvalidByteCount {
- p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize))
+ b = p.marshalVarintParam(b, maxDatagramFrameSizeParameterID, uint64(p.MaxDatagramFrameSize))
}
- return b.Bytes()
+ return b
}
-func (p *TransportParameters) marshalVarintParam(b *bytes.Buffer, id transportParameterID, val uint64) {
- quicvarint.Write(b, uint64(id))
- quicvarint.Write(b, uint64(quicvarint.Len(val)))
- quicvarint.Write(b, val)
+func (p *TransportParameters) marshalVarintParam(b []byte, id transportParameterID, val uint64) []byte {
+ b = quicvarint.Append(b, uint64(id))
+ b = quicvarint.Append(b, uint64(quicvarint.Len(val)))
+ return quicvarint.Append(b, val)
}
// MarshalForSessionTicket marshals the transport parameters we save in the session ticket.
@@ -411,23 +419,23 @@ func (p *TransportParameters) marshalVarintParam(b *bytes.Buffer, id transportPa
// if the transport parameters changed.
// Since the session ticket is encrypted, the serialization format is defined by the server.
// For convenience, we use the same format that we also use for sending the transport parameters.
-func (p *TransportParameters) MarshalForSessionTicket(b *bytes.Buffer) {
- quicvarint.Write(b, transportParameterMarshalingVersion)
+func (p *TransportParameters) MarshalForSessionTicket(b []byte) []byte {
+ b = quicvarint.Append(b, transportParameterMarshalingVersion)
// initial_max_stream_data_bidi_local
- p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal))
+ b = p.marshalVarintParam(b, initialMaxStreamDataBidiLocalParameterID, uint64(p.InitialMaxStreamDataBidiLocal))
// initial_max_stream_data_bidi_remote
- p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote))
+ b = p.marshalVarintParam(b, initialMaxStreamDataBidiRemoteParameterID, uint64(p.InitialMaxStreamDataBidiRemote))
// initial_max_stream_data_uni
- p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni))
+ b = p.marshalVarintParam(b, initialMaxStreamDataUniParameterID, uint64(p.InitialMaxStreamDataUni))
// initial_max_data
- p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData))
+ b = p.marshalVarintParam(b, initialMaxDataParameterID, uint64(p.InitialMaxData))
// initial_max_bidi_streams
- p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum))
+ b = p.marshalVarintParam(b, initialMaxStreamsBidiParameterID, uint64(p.MaxBidiStreamNum))
// initial_max_uni_streams
- p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum))
+ b = p.marshalVarintParam(b, initialMaxStreamsUniParameterID, uint64(p.MaxUniStreamNum))
// active_connection_id_limit
- p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit)
+ return p.marshalVarintParam(b, activeConnectionIDLimitParameterID, p.ActiveConnectionIDLimit)
}
// UnmarshalFromSessionTicket unmarshals transport parameters from a session ticket.
diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go
similarity index 50%
rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go
rename to vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go
index 196853e0f..3dc621135 100644
--- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go
+++ b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go
@@ -3,39 +3,38 @@ package wire
import (
"bytes"
"crypto/rand"
+ "encoding/binary"
"errors"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
// ParseVersionNegotiationPacket parses a Version Negotiation packet.
-func ParseVersionNegotiationPacket(b *bytes.Reader) (*Header, []protocol.VersionNumber, error) {
- hdr, err := parseHeader(b, 0)
+func ParseVersionNegotiationPacket(b []byte) (dest, src protocol.ArbitraryLenConnectionID, _ []protocol.VersionNumber, _ error) {
+ n, dest, src, err := ParseArbitraryLenConnectionIDs(b)
if err != nil {
- return nil, nil, err
+ return nil, nil, nil, err
}
- if b.Len() == 0 {
+ b = b[n:]
+ if len(b) == 0 {
//nolint:stylecheck
- return nil, nil, errors.New("Version Negotiation packet has empty version list")
+ return nil, nil, nil, errors.New("Version Negotiation packet has empty version list")
}
- if b.Len()%4 != 0 {
+ if len(b)%4 != 0 {
//nolint:stylecheck
- return nil, nil, errors.New("Version Negotiation packet has a version list with an invalid length")
+ return nil, nil, nil, errors.New("Version Negotiation packet has a version list with an invalid length")
}
- versions := make([]protocol.VersionNumber, b.Len()/4)
- for i := 0; b.Len() > 0; i++ {
- v, err := utils.BigEndian.ReadUint32(b)
- if err != nil {
- return nil, nil, err
- }
- versions[i] = protocol.VersionNumber(v)
+ versions := make([]protocol.VersionNumber, len(b)/4)
+ for i := 0; len(b) > 0; i++ {
+ versions[i] = protocol.VersionNumber(binary.BigEndian.Uint32(b[:4]))
+ b = b[4:]
}
- return hdr, versions, nil
+ return dest, src, versions, nil
}
// ComposeVersionNegotiation composes a Version Negotiation
-func ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, versions []protocol.VersionNumber) []byte {
+func ComposeVersionNegotiation(destConnID, srcConnID protocol.ArbitraryLenConnectionID, versions []protocol.VersionNumber) []byte {
greasedVersions := protocol.GetGreasedVersions(versions)
expectedLen := 1 /* type byte */ + 4 /* version field */ + 1 /* dest connection ID length field */ + destConnID.Len() + 1 /* src connection ID length field */ + srcConnID.Len() + len(greasedVersions)*4
buf := bytes.NewBuffer(make([]byte, 0, expectedLen))
@@ -44,9 +43,9 @@ func ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, vers
buf.WriteByte(r[0] | 0x80)
utils.BigEndian.WriteUint32(buf, 0) // version 0
buf.WriteByte(uint8(destConnID.Len()))
- buf.Write(destConnID)
+ buf.Write(destConnID.Bytes())
buf.WriteByte(uint8(srcConnID.Len()))
- buf.Write(srcConnID)
+ buf.Write(srcConnID.Bytes())
for _, v := range greasedVersions {
utils.BigEndian.WriteUint32(buf, uint32(v))
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/frame.go b/vendor/github.com/quic-go/quic-go/logging/frame.go
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/logging/frame.go
rename to vendor/github.com/quic-go/quic-go/logging/frame.go
index 75705092e..9a055db35 100644
--- a/vendor/github.com/lucas-clemente/quic-go/logging/frame.go
+++ b/vendor/github.com/quic-go/quic-go/logging/frame.go
@@ -1,6 +1,6 @@
package logging
-import "github.com/lucas-clemente/quic-go/internal/wire"
+import "github.com/quic-go/quic-go/internal/wire"
// A Frame is a QUIC frame
type Frame interface{}
diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/interface.go b/vendor/github.com/quic-go/quic-go/logging/interface.go
similarity index 79%
rename from vendor/github.com/lucas-clemente/quic-go/logging/interface.go
rename to vendor/github.com/quic-go/quic-go/logging/interface.go
index f71d68f7d..efcef151e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/logging/interface.go
+++ b/vendor/github.com/quic-go/quic-go/logging/interface.go
@@ -7,11 +7,10 @@ import (
"net"
"time"
- "github.com/lucas-clemente/quic-go/internal/utils"
-
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type (
@@ -19,6 +18,8 @@ type (
ByteCount = protocol.ByteCount
// A ConnectionID is a QUIC Connection ID.
ConnectionID = protocol.ConnectionID
+ // An ArbitraryLenConnectionID is a QUIC Connection ID that can be up to 255 bytes long.
+ ArbitraryLenConnectionID = protocol.ArbitraryLenConnectionID
// The EncryptionLevel is the encryption level of a packet.
EncryptionLevel = protocol.EncryptionLevel
// The KeyPhase is the key phase of the 1-RTT keys.
@@ -42,7 +43,7 @@ type (
// The Header is the QUIC packet header, before removing header protection.
Header = wire.Header
- // The ExtendedHeader is the QUIC packet header, after removing header protection.
+ // The ExtendedHeader is the QUIC Long Header packet header, after removing header protection.
ExtendedHeader = wire.ExtendedHeader
// The TransportParameters are QUIC transport parameters.
TransportParameters = wire.TransportParameters
@@ -90,6 +91,14 @@ const (
StreamTypeBidi = protocol.StreamTypeBidi
)
+// The ShortHeader is the QUIC Short Header packet header, after removing header protection.
+type ShortHeader struct {
+ DestConnectionID ConnectionID
+ PacketNumber PacketNumber
+ PacketNumberLen protocol.PacketNumberLen
+ KeyPhase KeyPhaseBit
+}
+
// A Tracer traces events.
type Tracer interface {
// TracerForConnection requests a new tracer for a connection.
@@ -99,6 +108,7 @@ type Tracer interface {
TracerForConnection(ctx context.Context, p Perspective, odcid ConnectionID) ConnectionTracer
SentPacket(net.Addr, *Header, ByteCount, []Frame)
+ SentVersionNegotiationPacket(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []VersionNumber)
DroppedPacket(net.Addr, PacketType, ByteCount, PacketDropReason)
}
@@ -110,11 +120,13 @@ type ConnectionTracer interface {
SentTransportParameters(*TransportParameters)
ReceivedTransportParameters(*TransportParameters)
RestoredTransportParameters(parameters *TransportParameters) // for 0-RTT
- SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame)
- ReceivedVersionNegotiationPacket(*Header, []VersionNumber)
+ SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame)
+ SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame)
+ ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber)
ReceivedRetry(*Header)
- ReceivedPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame)
- BufferedPacket(PacketType)
+ ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame)
+ ReceivedShortHeaderPacket(hdr *ShortHeader, size ByteCount, frames []Frame)
+ BufferedPacket(PacketType, ByteCount)
DroppedPacket(PacketType, ByteCount, PacketDropReason)
UpdatedMetrics(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int)
AcknowledgedPacket(EncryptionLevel, PacketNumber)
diff --git a/vendor/github.com/quic-go/quic-go/logging/mockgen.go b/vendor/github.com/quic-go/quic-go/logging/mockgen.go
new file mode 100644
index 000000000..d50916799
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/logging/mockgen.go
@@ -0,0 +1,4 @@
+package logging
+
+//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/quic-go/quic-go/logging -destination mock_connection_tracer_test.go github.com/quic-go/quic-go/logging ConnectionTracer"
+//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/quic-go/quic-go/logging -destination mock_tracer_test.go github.com/quic-go/quic-go/logging Tracer"
diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/multiplex.go b/vendor/github.com/quic-go/quic-go/logging/multiplex.go
similarity index 81%
rename from vendor/github.com/lucas-clemente/quic-go/logging/multiplex.go
rename to vendor/github.com/quic-go/quic-go/logging/multiplex.go
index 8280e8cdf..8e85db494 100644
--- a/vendor/github.com/lucas-clemente/quic-go/logging/multiplex.go
+++ b/vendor/github.com/quic-go/quic-go/logging/multiplex.go
@@ -39,6 +39,12 @@ func (m *tracerMultiplexer) SentPacket(remote net.Addr, hdr *Header, size ByteCo
}
}
+func (m *tracerMultiplexer) SentVersionNegotiationPacket(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []VersionNumber) {
+ for _, t := range m.tracers {
+ t.SentVersionNegotiationPacket(remote, dest, src, versions)
+ }
+}
+
func (m *tracerMultiplexer) DroppedPacket(remote net.Addr, typ PacketType, size ByteCount, reason PacketDropReason) {
for _, t := range m.tracers {
t.DroppedPacket(remote, typ, size, reason)
@@ -98,15 +104,21 @@ func (m *connTracerMultiplexer) RestoredTransportParameters(tp *TransportParamet
}
}
-func (m *connTracerMultiplexer) SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) {
+func (m *connTracerMultiplexer) SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) {
+ for _, t := range m.tracers {
+ t.SentLongHeaderPacket(hdr, size, ack, frames)
+ }
+}
+
+func (m *connTracerMultiplexer) SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame) {
for _, t := range m.tracers {
- t.SentPacket(hdr, size, ack, frames)
+ t.SentShortHeaderPacket(hdr, size, ack, frames)
}
}
-func (m *connTracerMultiplexer) ReceivedVersionNegotiationPacket(hdr *Header, versions []VersionNumber) {
+func (m *connTracerMultiplexer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, versions []VersionNumber) {
for _, t := range m.tracers {
- t.ReceivedVersionNegotiationPacket(hdr, versions)
+ t.ReceivedVersionNegotiationPacket(dest, src, versions)
}
}
@@ -116,15 +128,21 @@ func (m *connTracerMultiplexer) ReceivedRetry(hdr *Header) {
}
}
-func (m *connTracerMultiplexer) ReceivedPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) {
+func (m *connTracerMultiplexer) ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) {
+ for _, t := range m.tracers {
+ t.ReceivedLongHeaderPacket(hdr, size, frames)
+ }
+}
+
+func (m *connTracerMultiplexer) ReceivedShortHeaderPacket(hdr *ShortHeader, size ByteCount, frames []Frame) {
for _, t := range m.tracers {
- t.ReceivedPacket(hdr, size, frames)
+ t.ReceivedShortHeaderPacket(hdr, size, frames)
}
}
-func (m *connTracerMultiplexer) BufferedPacket(typ PacketType) {
+func (m *connTracerMultiplexer) BufferedPacket(typ PacketType, size ByteCount) {
for _, t := range m.tracers {
- t.BufferedPacket(typ)
+ t.BufferedPacket(typ, size)
}
}
diff --git a/vendor/github.com/quic-go/quic-go/logging/null_tracer.go b/vendor/github.com/quic-go/quic-go/logging/null_tracer.go
new file mode 100644
index 000000000..38052ae3b
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/logging/null_tracer.go
@@ -0,0 +1,62 @@
+package logging
+
+import (
+ "context"
+ "net"
+ "time"
+)
+
+// The NullTracer is a Tracer that does nothing.
+// It is useful for embedding.
+type NullTracer struct{}
+
+var _ Tracer = &NullTracer{}
+
+func (n NullTracer) TracerForConnection(context.Context, Perspective, ConnectionID) ConnectionTracer {
+ return NullConnectionTracer{}
+}
+func (n NullTracer) SentPacket(net.Addr, *Header, ByteCount, []Frame) {}
+func (n NullTracer) SentVersionNegotiationPacket(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []VersionNumber) {
+}
+func (n NullTracer) DroppedPacket(net.Addr, PacketType, ByteCount, PacketDropReason) {}
+
+// The NullConnectionTracer is a ConnectionTracer that does nothing.
+// It is useful for embedding.
+type NullConnectionTracer struct{}
+
+var _ ConnectionTracer = &NullConnectionTracer{}
+
+func (n NullConnectionTracer) StartedConnection(local, remote net.Addr, srcConnID, destConnID ConnectionID) {
+}
+
+func (n NullConnectionTracer) NegotiatedVersion(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) {
+}
+func (n NullConnectionTracer) ClosedConnection(err error) {}
+func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {}
+func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {}
+func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {}
+func (n NullConnectionTracer) SentLongHeaderPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {}
+func (n NullConnectionTracer) SentShortHeaderPacket(*ShortHeader, ByteCount, *AckFrame, []Frame) {}
+func (n NullConnectionTracer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) {
+}
+func (n NullConnectionTracer) ReceivedRetry(*Header) {}
+func (n NullConnectionTracer) ReceivedLongHeaderPacket(*ExtendedHeader, ByteCount, []Frame) {}
+func (n NullConnectionTracer) ReceivedShortHeaderPacket(*ShortHeader, ByteCount, []Frame) {}
+func (n NullConnectionTracer) BufferedPacket(PacketType, ByteCount) {}
+func (n NullConnectionTracer) DroppedPacket(PacketType, ByteCount, PacketDropReason) {}
+
+func (n NullConnectionTracer) UpdatedMetrics(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) {
+}
+func (n NullConnectionTracer) AcknowledgedPacket(EncryptionLevel, PacketNumber) {}
+func (n NullConnectionTracer) LostPacket(EncryptionLevel, PacketNumber, PacketLossReason) {}
+func (n NullConnectionTracer) UpdatedCongestionState(CongestionState) {}
+func (n NullConnectionTracer) UpdatedPTOCount(uint32) {}
+func (n NullConnectionTracer) UpdatedKeyFromTLS(EncryptionLevel, Perspective) {}
+func (n NullConnectionTracer) UpdatedKey(keyPhase KeyPhase, remote bool) {}
+func (n NullConnectionTracer) DroppedEncryptionLevel(EncryptionLevel) {}
+func (n NullConnectionTracer) DroppedKey(KeyPhase) {}
+func (n NullConnectionTracer) SetLossTimer(TimerType, EncryptionLevel, time.Time) {}
+func (n NullConnectionTracer) LossTimerExpired(timerType TimerType, level EncryptionLevel) {}
+func (n NullConnectionTracer) LossTimerCanceled() {}
+func (n NullConnectionTracer) Close() {}
+func (n NullConnectionTracer) Debug(name, msg string) {}
diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/packet_header.go b/vendor/github.com/quic-go/quic-go/logging/packet_header.go
similarity index 83%
rename from vendor/github.com/lucas-clemente/quic-go/logging/packet_header.go
rename to vendor/github.com/quic-go/quic-go/logging/packet_header.go
index ea4282fe3..6b8df58d8 100644
--- a/vendor/github.com/lucas-clemente/quic-go/logging/packet_header.go
+++ b/vendor/github.com/quic-go/quic-go/logging/packet_header.go
@@ -1,14 +1,11 @@
package logging
import (
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// PacketTypeFromHeader determines the packet type from a *wire.Header.
func PacketTypeFromHeader(hdr *Header) PacketType {
- if !hdr.IsLongHeader {
- return PacketType1RTT
- }
if hdr.Version == 0 {
return PacketTypeVersionNegotiation
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/types.go b/vendor/github.com/quic-go/quic-go/logging/types.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/logging/types.go
rename to vendor/github.com/quic-go/quic-go/logging/types.go
diff --git a/vendor/github.com/quic-go/quic-go/mockgen.go b/vendor/github.com/quic-go/quic-go/mockgen.go
new file mode 100644
index 000000000..abe1faabc
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/mockgen.go
@@ -0,0 +1,27 @@
+package quic
+
+//go:generate sh -c "./mockgen_private.sh quic mock_send_conn_test.go github.com/quic-go/quic-go sendConn"
+//go:generate sh -c "./mockgen_private.sh quic mock_sender_test.go github.com/quic-go/quic-go sender"
+//go:generate sh -c "./mockgen_private.sh quic mock_stream_internal_test.go github.com/quic-go/quic-go streamI"
+//go:generate sh -c "./mockgen_private.sh quic mock_crypto_stream_test.go github.com/quic-go/quic-go cryptoStream"
+//go:generate sh -c "./mockgen_private.sh quic mock_receive_stream_internal_test.go github.com/quic-go/quic-go receiveStreamI"
+//go:generate sh -c "./mockgen_private.sh quic mock_send_stream_internal_test.go github.com/quic-go/quic-go sendStreamI"
+//go:generate sh -c "./mockgen_private.sh quic mock_stream_sender_test.go github.com/quic-go/quic-go streamSender"
+//go:generate sh -c "./mockgen_private.sh quic mock_stream_getter_test.go github.com/quic-go/quic-go streamGetter"
+//go:generate sh -c "./mockgen_private.sh quic mock_crypto_data_handler_test.go github.com/quic-go/quic-go cryptoDataHandler"
+//go:generate sh -c "./mockgen_private.sh quic mock_frame_source_test.go github.com/quic-go/quic-go frameSource"
+//go:generate sh -c "./mockgen_private.sh quic mock_ack_frame_source_test.go github.com/quic-go/quic-go ackFrameSource"
+//go:generate sh -c "./mockgen_private.sh quic mock_stream_manager_test.go github.com/quic-go/quic-go streamManager"
+//go:generate sh -c "./mockgen_private.sh quic mock_sealing_manager_test.go github.com/quic-go/quic-go sealingManager"
+//go:generate sh -c "./mockgen_private.sh quic mock_unpacker_test.go github.com/quic-go/quic-go unpacker"
+//go:generate sh -c "./mockgen_private.sh quic mock_packer_test.go github.com/quic-go/quic-go packer"
+//go:generate sh -c "./mockgen_private.sh quic mock_mtu_discoverer_test.go github.com/quic-go/quic-go mtuDiscoverer"
+//go:generate sh -c "./mockgen_private.sh quic mock_conn_runner_test.go github.com/quic-go/quic-go connRunner"
+//go:generate sh -c "./mockgen_private.sh quic mock_quic_conn_test.go github.com/quic-go/quic-go quicConn"
+//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_test.go github.com/quic-go/quic-go packetHandler"
+//go:generate sh -c "./mockgen_private.sh quic mock_unknown_packet_handler_test.go github.com/quic-go/quic-go unknownPacketHandler"
+//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/quic-go/quic-go packetHandlerManager"
+//go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/quic-go/quic-go multiplexer"
+//go:generate sh -c "./mockgen_private.sh quic mock_batch_conn_test.go github.com/quic-go/quic-go batchConn"
+//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/quic-go/quic-go -destination mock_token_store_test.go github.com/quic-go/quic-go TokenStore"
+//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/quic-go/quic-go -destination mock_packetconn_test.go net PacketConn"
diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh b/vendor/github.com/quic-go/quic-go/mockgen_private.sh
similarity index 88%
rename from vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh
rename to vendor/github.com/quic-go/quic-go/mockgen_private.sh
index 92829d770..79f63eee3 100644
--- a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh
+++ b/vendor/github.com/quic-go/quic-go/mockgen_private.sh
@@ -17,7 +17,7 @@ for f in *.go; do
continue;
fi
if $(egrep -qe "type (.*) interface" $f); then
- AUX+=("github.com/lucas-clemente/quic-go=$f")
+ AUX+=("github.com/quic-go/quic-go=$f")
fi
done
@@ -44,6 +44,6 @@ AUX_FILES=$(IFS=, ; echo "${AUX[*]}")
## create a public alias for the interface, so that mockgen can process it
echo -e "package $1\n" > $TMPFILE
echo "$INTERFACE" | sed "s/$ORIG_INTERFACE_NAME/$INTERFACE_NAME/" >> $TMPFILE
-mockgen -package $1 -self_package $3 -destination $DEST -source=$TMPFILE -aux_files $AUX_FILES
+go run github.com/golang/mock/mockgen -package $1 -self_package $3 -destination $DEST -source=$TMPFILE -aux_files $AUX_FILES
sed "s/$TMPFILE/$SRC/" "$DEST" > "$DEST.new" && mv "$DEST.new" "$DEST"
rm "$TMPFILE"
diff --git a/vendor/github.com/lucas-clemente/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go
similarity index 89%
rename from vendor/github.com/lucas-clemente/quic-go/mtu_discoverer.go
rename to vendor/github.com/quic-go/quic-go/mtu_discoverer.go
index bf38eaac7..5a8484c76 100644
--- a/vendor/github.com/lucas-clemente/quic-go/mtu_discoverer.go
+++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go
@@ -3,10 +3,10 @@ package quic
import (
"time"
- "github.com/lucas-clemente/quic-go/internal/ackhandler"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/ackhandler"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type mtuDiscoverer interface {
diff --git a/vendor/github.com/lucas-clemente/quic-go/multiplexer.go b/vendor/github.com/quic-go/quic-go/multiplexer.go
similarity index 81%
rename from vendor/github.com/lucas-clemente/quic-go/multiplexer.go
rename to vendor/github.com/quic-go/quic-go/multiplexer.go
index 2271b5517..37d4e75cf 100644
--- a/vendor/github.com/lucas-clemente/quic-go/multiplexer.go
+++ b/vendor/github.com/quic-go/quic-go/multiplexer.go
@@ -1,13 +1,12 @@
package quic
import (
- "bytes"
"fmt"
"net"
"sync"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/logging"
)
var (
@@ -20,13 +19,13 @@ type indexableConn interface {
}
type multiplexer interface {
- AddConn(c net.PacketConn, connIDLen int, statelessResetKey []byte, tracer logging.Tracer) (packetHandlerManager, error)
+ AddConn(c net.PacketConn, connIDLen int, statelessResetKey *StatelessResetKey, tracer logging.Tracer) (packetHandlerManager, error)
RemoveConn(indexableConn) error
}
type connManager struct {
connIDLen int
- statelessResetKey []byte
+ statelessResetKey *StatelessResetKey
tracer logging.Tracer
manager packetHandlerManager
}
@@ -37,7 +36,7 @@ type connMultiplexer struct {
mutex sync.Mutex
conns map[string] /* LocalAddr().String() */ connManager
- newPacketHandlerManager func(net.PacketConn, int, []byte, logging.Tracer, utils.Logger) (packetHandlerManager, error) // so it can be replaced in the tests
+ newPacketHandlerManager func(net.PacketConn, int, *StatelessResetKey, logging.Tracer, utils.Logger) (packetHandlerManager, error) // so it can be replaced in the tests
logger utils.Logger
}
@@ -58,7 +57,7 @@ func getMultiplexer() multiplexer {
func (m *connMultiplexer) AddConn(
c net.PacketConn,
connIDLen int,
- statelessResetKey []byte,
+ statelessResetKey *StatelessResetKey,
tracer logging.Tracer,
) (packetHandlerManager, error) {
m.mutex.Lock()
@@ -83,7 +82,7 @@ func (m *connMultiplexer) AddConn(
if p.connIDLen != connIDLen {
return nil, fmt.Errorf("cannot use %d byte connection IDs on a connection that is already using %d byte connction IDs", connIDLen, p.connIDLen)
}
- if statelessResetKey != nil && !bytes.Equal(p.statelessResetKey, statelessResetKey) {
+ if statelessResetKey != nil && p.statelessResetKey != statelessResetKey {
return nil, fmt.Errorf("cannot use different stateless reset keys on the same packet conn")
}
if tracer != p.tracer {
diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go b/vendor/github.com/quic-go/quic-go/packet_handler_map.go
similarity index 77%
rename from vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go
rename to vendor/github.com/quic-go/quic-go/packet_handler_map.go
index 2d55a95ef..e2bc913ca 100644
--- a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go
+++ b/vendor/github.com/quic-go/quic-go/packet_handler_map.go
@@ -16,39 +16,12 @@ import (
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
)
-type zeroRTTQueue struct {
- queue []*receivedPacket
- retireTimer *time.Timer
-}
-
-var _ packetHandler = &zeroRTTQueue{}
-
-func (h *zeroRTTQueue) handlePacket(p *receivedPacket) {
- if len(h.queue) < protocol.Max0RTTQueueLen {
- h.queue = append(h.queue, p)
- }
-}
-func (h *zeroRTTQueue) shutdown() {}
-func (h *zeroRTTQueue) destroy(error) {}
-func (h *zeroRTTQueue) getPerspective() protocol.Perspective { return protocol.PerspectiveClient }
-func (h *zeroRTTQueue) EnqueueAll(sess packetHandler) {
- for _, p := range h.queue {
- sess.handlePacket(p)
- }
-}
-
-func (h *zeroRTTQueue) Clear() {
- for _, p := range h.queue {
- p.buffer.Release()
- }
-}
-
// rawConn is a connection that allow reading of a receivedPacket.
type rawConn interface {
ReadPacket() (*receivedPacket, error)
@@ -57,9 +30,10 @@ type rawConn interface {
io.Closer
}
-type packetHandlerMapEntry struct {
- packetHandler packetHandler
- is0RTTQueue bool
+type closePacket struct {
+ payload []byte
+ addr net.Addr
+ info *packetInfo
}
// The packetHandlerMap stores packetHandlers, identified by connection ID.
@@ -72,7 +46,9 @@ type packetHandlerMap struct {
conn rawConn
connIDLen int
- handlers map[string] /* string(ConnectionID)*/ packetHandlerMapEntry
+ closeQueue chan closePacket
+
+ handlers map[protocol.ConnectionID]packetHandler
resetTokens map[protocol.StatelessResetToken] /* stateless reset token */ packetHandler
server unknownPacketHandler
numZeroRTTEntries int
@@ -129,7 +105,7 @@ var receiveBufferWarningOnce sync.Once
func newPacketHandlerMap(
c net.PacketConn,
connIDLen int,
- statelessResetKey []byte,
+ statelessResetKey *StatelessResetKey,
tracer logging.Tracer,
logger utils.Logger,
) (packetHandlerManager, error) {
@@ -139,7 +115,7 @@ func newPacketHandlerMap(
if disable, _ := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING")); disable {
return
}
- log.Printf("%s. See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err)
+ log.Printf("%s. See https://github.com/quic-go/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err)
})
}
}
@@ -151,16 +127,20 @@ func newPacketHandlerMap(
conn: conn,
connIDLen: connIDLen,
listening: make(chan struct{}),
- handlers: make(map[string]packetHandlerMapEntry),
+ handlers: make(map[protocol.ConnectionID]packetHandler),
resetTokens: make(map[protocol.StatelessResetToken]packetHandler),
deleteRetiredConnsAfter: protocol.RetiredConnectionIDDeleteTimeout,
zeroRTTQueueDuration: protocol.Max0RTTQueueingDuration,
- statelessResetEnabled: len(statelessResetKey) > 0,
- statelessResetHasher: hmac.New(sha256.New, statelessResetKey),
+ closeQueue: make(chan closePacket, 4),
+ statelessResetEnabled: statelessResetKey != nil,
tracer: tracer,
logger: logger,
}
+ if m.statelessResetEnabled {
+ m.statelessResetHasher = hmac.New(sha256.New, statelessResetKey[:])
+ }
go m.listen()
+ go m.runCloseQueue()
if logger.Debug() {
go m.logUsage()
@@ -198,11 +178,11 @@ func (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler)
h.mutex.Lock()
defer h.mutex.Unlock()
- if _, ok := h.handlers[string(id)]; ok {
+ if _, ok := h.handlers[id]; ok {
h.logger.Debugf("Not adding connection ID %s, as it already exists.", id)
return false
}
- h.handlers[string(id)] = packetHandlerMapEntry{packetHandler: handler}
+ h.handlers[id] = handler
h.logger.Debugf("Adding connection ID %s.", id)
return true
}
@@ -212,31 +192,31 @@ func (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.Co
defer h.mutex.Unlock()
var q *zeroRTTQueue
- if entry, ok := h.handlers[string(clientDestConnID)]; ok {
- if !entry.is0RTTQueue {
+ if handler, ok := h.handlers[clientDestConnID]; ok {
+ q, ok = handler.(*zeroRTTQueue)
+ if !ok {
h.logger.Debugf("Not adding connection ID %s for a new connection, as it already exists.", clientDestConnID)
return false
}
- q = entry.packetHandler.(*zeroRTTQueue)
q.retireTimer.Stop()
h.numZeroRTTEntries--
if h.numZeroRTTEntries < 0 {
panic("number of 0-RTT queues < 0")
}
}
- sess := fn()
+ conn := fn()
if q != nil {
- q.EnqueueAll(sess)
+ q.EnqueueAll(conn)
}
- h.handlers[string(clientDestConnID)] = packetHandlerMapEntry{packetHandler: sess}
- h.handlers[string(newConnID)] = packetHandlerMapEntry{packetHandler: sess}
+ h.handlers[clientDestConnID] = conn
+ h.handlers[newConnID] = conn
h.logger.Debugf("Adding connection IDs %s and %s for a new connection.", clientDestConnID, newConnID)
return true
}
func (h *packetHandlerMap) Remove(id protocol.ConnectionID) {
h.mutex.Lock()
- delete(h.handlers, string(id))
+ delete(h.handlers, id)
h.mutex.Unlock()
h.logger.Debugf("Removing connection ID %s.", id)
}
@@ -245,27 +225,64 @@ func (h *packetHandlerMap) Retire(id protocol.ConnectionID) {
h.logger.Debugf("Retiring connection ID %s in %s.", id, h.deleteRetiredConnsAfter)
time.AfterFunc(h.deleteRetiredConnsAfter, func() {
h.mutex.Lock()
- delete(h.handlers, string(id))
+ delete(h.handlers, id)
h.mutex.Unlock()
h.logger.Debugf("Removing connection ID %s after it has been retired.", id)
})
}
-func (h *packetHandlerMap) ReplaceWithClosed(id protocol.ConnectionID, handler packetHandler) {
+// ReplaceWithClosed is called when a connection is closed.
+// Depending on which side closed the connection, we need to:
+// * remote close: absorb delayed packets
+// * local close: retransmit the CONNECTION_CLOSE packet, in case it was lost
+func (h *packetHandlerMap) ReplaceWithClosed(ids []protocol.ConnectionID, pers protocol.Perspective, connClosePacket []byte) {
+ var handler packetHandler
+ if connClosePacket != nil {
+ handler = newClosedLocalConn(
+ func(addr net.Addr, info *packetInfo) {
+ select {
+ case h.closeQueue <- closePacket{payload: connClosePacket, addr: addr, info: info}:
+ default:
+ // Oops, we're backlogged.
+ // Just drop the packet, sending CONNECTION_CLOSE copies is best effort anyway.
+ }
+ },
+ pers,
+ h.logger,
+ )
+ } else {
+ handler = newClosedRemoteConn(pers)
+ }
+
h.mutex.Lock()
- h.handlers[string(id)] = packetHandlerMapEntry{packetHandler: handler}
+ for _, id := range ids {
+ h.handlers[id] = handler
+ }
h.mutex.Unlock()
- h.logger.Debugf("Replacing connection for connection ID %s with a closed connection.", id)
+ h.logger.Debugf("Replacing connection for connection IDs %s with a closed connection.", ids)
time.AfterFunc(h.deleteRetiredConnsAfter, func() {
h.mutex.Lock()
handler.shutdown()
- delete(h.handlers, string(id))
+ for _, id := range ids {
+ delete(h.handlers, id)
+ }
h.mutex.Unlock()
- h.logger.Debugf("Removing connection ID %s for a closed connection after it has been retired.", id)
+ h.logger.Debugf("Removing connection IDs %s for a closed connection after it has been retired.", ids)
})
}
+func (h *packetHandlerMap) runCloseQueue() {
+ for {
+ select {
+ case <-h.listening:
+ return
+ case p := <-h.closeQueue:
+ h.conn.WritePacket(p.payload, p.addr, p.info.OOB())
+ }
+ }
+}
+
func (h *packetHandlerMap) AddResetToken(token protocol.StatelessResetToken, handler packetHandler) {
h.mutex.Lock()
h.resetTokens[token] = handler
@@ -292,14 +309,14 @@ func (h *packetHandlerMap) CloseServer() {
}
h.server = nil
var wg sync.WaitGroup
- for _, entry := range h.handlers {
- if entry.packetHandler.getPerspective() == protocol.PerspectiveServer {
+ for _, handler := range h.handlers {
+ if handler.getPerspective() == protocol.PerspectiveServer {
wg.Add(1)
go func(handler packetHandler) {
// blocks until the CONNECTION_CLOSE has been sent and the run-loop has stopped
handler.shutdown()
wg.Done()
- }(entry.packetHandler)
+ }(handler)
}
}
h.mutex.Unlock()
@@ -324,12 +341,12 @@ func (h *packetHandlerMap) close(e error) error {
}
var wg sync.WaitGroup
- for _, entry := range h.handlers {
+ for _, handler := range h.handlers {
wg.Add(1)
go func(handler packetHandler) {
handler.destroy(e)
wg.Done()
- }(entry.packetHandler)
+ }(handler)
}
if h.server != nil {
@@ -348,7 +365,7 @@ func (h *packetHandlerMap) listen() {
//nolint:staticcheck // SA1019 ignore this!
// TODO: This code is used to ignore wsa errors on Windows.
// Since net.Error.Temporary is deprecated as of Go 1.18, we should find a better solution.
- // See https://github.com/lucas-clemente/quic-go/issues/1737 for details.
+ // See https://github.com/quic-go/quic-go/issues/1737 for details.
if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
h.logger.Debugf("Temporary error reading from conn: %w", err)
continue
@@ -379,18 +396,18 @@ func (h *packetHandlerMap) handlePacket(p *receivedPacket) {
return
}
- if entry, ok := h.handlers[string(connID)]; ok {
- if entry.is0RTTQueue { // only enqueue 0-RTT packets in the 0-RTT queue
+ if handler, ok := h.handlers[connID]; ok {
+ if ha, ok := handler.(*zeroRTTQueue); ok { // only enqueue 0-RTT packets in the 0-RTT queue
if wire.Is0RTTPacket(p.data) {
- entry.packetHandler.handlePacket(p)
+ ha.handlePacket(p)
return
}
} else { // existing connection
- entry.packetHandler.handlePacket(p)
+ handler.handlePacket(p)
return
}
}
- if p.data[0]&0x80 == 0 {
+ if !wire.IsLongHeaderPacket(p.data[0]) {
go h.maybeSendStatelessReset(p, connID)
return
}
@@ -404,24 +421,23 @@ func (h *packetHandlerMap) handlePacket(p *receivedPacket) {
}
h.numZeroRTTEntries++
queue := &zeroRTTQueue{queue: make([]*receivedPacket, 0, 8)}
- h.handlers[string(connID)] = packetHandlerMapEntry{
- packetHandler: queue,
- is0RTTQueue: true,
- }
+ h.handlers[connID] = queue
queue.retireTimer = time.AfterFunc(h.zeroRTTQueueDuration, func() {
h.mutex.Lock()
defer h.mutex.Unlock()
// The entry might have been replaced by an actual connection.
// Only delete it if it's still a 0-RTT queue.
- if entry, ok := h.handlers[string(connID)]; ok && entry.is0RTTQueue {
- delete(h.handlers, string(connID))
- h.numZeroRTTEntries--
- if h.numZeroRTTEntries < 0 {
- panic("number of 0-RTT queues < 0")
- }
- entry.packetHandler.(*zeroRTTQueue).Clear()
- if h.logger.Debug() {
- h.logger.Debugf("Removing 0-RTT queue for %s.", connID)
+ if handler, ok := h.handlers[connID]; ok {
+ if q, ok := handler.(*zeroRTTQueue); ok {
+ delete(h.handlers, connID)
+ h.numZeroRTTEntries--
+ if h.numZeroRTTEntries < 0 {
+ panic("number of 0-RTT queues < 0")
+ }
+ q.Clear()
+ if h.logger.Debug() {
+ h.logger.Debugf("Removing 0-RTT queue for %s.", connID)
+ }
}
}
})
@@ -433,7 +449,7 @@ func (h *packetHandlerMap) handlePacket(p *receivedPacket) {
func (h *packetHandlerMap) maybeHandleStatelessReset(data []byte) bool {
// stateless resets are always short header packets
- if data[0]&0x80 != 0 {
+ if wire.IsLongHeaderPacket(data[0]) {
return false
}
if len(data) < 17 /* type byte + 16 bytes for the reset token */ {
diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go
new file mode 100644
index 000000000..14befd460
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/packet_packer.go
@@ -0,0 +1,968 @@
+package quic
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/quic-go/quic-go/internal/ackhandler"
+ "github.com/quic-go/quic-go/internal/handshake"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+)
+
+var errNothingToPack = errors.New("nothing to pack")
+
+type packer interface {
+ PackCoalescedPacket(onlyAck bool, v protocol.VersionNumber) (*coalescedPacket, error)
+ PackPacket(onlyAck bool, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error)
+ MaybePackProbePacket(protocol.EncryptionLevel, protocol.VersionNumber) (*coalescedPacket, error)
+ PackConnectionClose(*qerr.TransportError, protocol.VersionNumber) (*coalescedPacket, error)
+ PackApplicationClose(*qerr.ApplicationError, protocol.VersionNumber) (*coalescedPacket, error)
+
+ SetMaxPacketSize(protocol.ByteCount)
+ PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error)
+
+ HandleTransportParameters(*wire.TransportParameters)
+ SetToken([]byte)
+}
+
+type sealer interface {
+ handshake.LongHeaderSealer
+}
+
+type payload struct {
+ frames []*ackhandler.Frame
+ ack *wire.AckFrame
+ length protocol.ByteCount
+}
+
+type longHeaderPacket struct {
+ header *wire.ExtendedHeader
+ ack *wire.AckFrame
+ frames []*ackhandler.Frame
+
+ length protocol.ByteCount
+
+ isMTUProbePacket bool
+}
+
+type shortHeaderPacket struct {
+ *ackhandler.Packet
+ // used for logging
+ DestConnID protocol.ConnectionID
+ Ack *wire.AckFrame
+ PacketNumberLen protocol.PacketNumberLen
+ KeyPhase protocol.KeyPhaseBit
+}
+
+func (p *shortHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.Frames) }
+
+type coalescedPacket struct {
+ buffer *packetBuffer
+ longHdrPackets []*longHeaderPacket
+ shortHdrPacket *shortHeaderPacket
+}
+
+func (p *longHeaderPacket) EncryptionLevel() protocol.EncryptionLevel {
+ //nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data).
+ switch p.header.Type {
+ case protocol.PacketTypeInitial:
+ return protocol.EncryptionInitial
+ case protocol.PacketTypeHandshake:
+ return protocol.EncryptionHandshake
+ case protocol.PacketType0RTT:
+ return protocol.Encryption0RTT
+ default:
+ panic("can't determine encryption level")
+ }
+}
+
+func (p *longHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.frames) }
+
+func (p *longHeaderPacket) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet {
+ largestAcked := protocol.InvalidPacketNumber
+ if p.ack != nil {
+ largestAcked = p.ack.LargestAcked()
+ }
+ encLevel := p.EncryptionLevel()
+ for i := range p.frames {
+ if p.frames[i].OnLost != nil {
+ continue
+ }
+ //nolint:exhaustive // Short header packets are handled separately.
+ switch encLevel {
+ case protocol.EncryptionInitial:
+ p.frames[i].OnLost = q.AddInitial
+ case protocol.EncryptionHandshake:
+ p.frames[i].OnLost = q.AddHandshake
+ case protocol.Encryption0RTT:
+ p.frames[i].OnLost = q.AddAppData
+ }
+ }
+
+ ap := ackhandler.GetPacket()
+ ap.PacketNumber = p.header.PacketNumber
+ ap.LargestAcked = largestAcked
+ ap.Frames = p.frames
+ ap.Length = p.length
+ ap.EncryptionLevel = encLevel
+ ap.SendTime = now
+ ap.IsPathMTUProbePacket = p.isMTUProbePacket
+ return ap
+}
+
+func getMaxPacketSize(addr net.Addr) protocol.ByteCount {
+ maxSize := protocol.ByteCount(protocol.MinInitialPacketSize)
+ // If this is not a UDP address, we don't know anything about the MTU.
+ // Use the minimum size of an Initial packet as the max packet size.
+ if udpAddr, ok := addr.(*net.UDPAddr); ok {
+ if utils.IsIPv4(udpAddr.IP) {
+ maxSize = protocol.InitialPacketSizeIPv4
+ } else {
+ maxSize = protocol.InitialPacketSizeIPv6
+ }
+ }
+ return maxSize
+}
+
+type packetNumberManager interface {
+ PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen)
+ PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber
+}
+
+type sealingManager interface {
+ GetInitialSealer() (handshake.LongHeaderSealer, error)
+ GetHandshakeSealer() (handshake.LongHeaderSealer, error)
+ Get0RTTSealer() (handshake.LongHeaderSealer, error)
+ Get1RTTSealer() (handshake.ShortHeaderSealer, error)
+}
+
+type frameSource interface {
+ HasData() bool
+ AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount)
+ AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount)
+}
+
+type ackFrameSource interface {
+ GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame
+}
+
+type packetPacker struct {
+ srcConnID protocol.ConnectionID
+ getDestConnID func() protocol.ConnectionID
+
+ perspective protocol.Perspective
+ cryptoSetup sealingManager
+
+ initialStream cryptoStream
+ handshakeStream cryptoStream
+
+ token []byte
+
+ pnManager packetNumberManager
+ framer frameSource
+ acks ackFrameSource
+ datagramQueue *datagramQueue
+ retransmissionQueue *retransmissionQueue
+
+ maxPacketSize protocol.ByteCount
+ numNonAckElicitingAcks int
+}
+
+var _ packer = &packetPacker{}
+
+func newPacketPacker(srcConnID protocol.ConnectionID, getDestConnID func() protocol.ConnectionID, initialStream cryptoStream, handshakeStream cryptoStream, packetNumberManager packetNumberManager, retransmissionQueue *retransmissionQueue, remoteAddr net.Addr, cryptoSetup sealingManager, framer frameSource, acks ackFrameSource, datagramQueue *datagramQueue, perspective protocol.Perspective) *packetPacker {
+ return &packetPacker{
+ cryptoSetup: cryptoSetup,
+ getDestConnID: getDestConnID,
+ srcConnID: srcConnID,
+ initialStream: initialStream,
+ handshakeStream: handshakeStream,
+ retransmissionQueue: retransmissionQueue,
+ datagramQueue: datagramQueue,
+ perspective: perspective,
+ framer: framer,
+ acks: acks,
+ pnManager: packetNumberManager,
+ maxPacketSize: getMaxPacketSize(remoteAddr),
+ }
+}
+
+// PackConnectionClose packs a packet that closes the connection with a transport error.
+func (p *packetPacker) PackConnectionClose(e *qerr.TransportError, v protocol.VersionNumber) (*coalescedPacket, error) {
+ var reason string
+ // don't send details of crypto errors
+ if !e.ErrorCode.IsCryptoError() {
+ reason = e.ErrorMessage
+ }
+ return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason, v)
+}
+
+// PackApplicationClose packs a packet that closes the connection with an application error.
+func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError, v protocol.VersionNumber) (*coalescedPacket, error) {
+ return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage, v)
+}
+
+func (p *packetPacker) packConnectionClose(
+ isApplicationError bool,
+ errorCode uint64,
+ frameType uint64,
+ reason string,
+ v protocol.VersionNumber,
+) (*coalescedPacket, error) {
+ var sealers [4]sealer
+ var hdrs [3]*wire.ExtendedHeader
+ var payloads [4]payload
+ var size protocol.ByteCount
+ var connID protocol.ConnectionID
+ var oneRTTPacketNumber protocol.PacketNumber
+ var oneRTTPacketNumberLen protocol.PacketNumberLen
+ var keyPhase protocol.KeyPhaseBit // only set for 1-RTT
+ var numLongHdrPackets uint8
+ encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT}
+ for i, encLevel := range encLevels {
+ if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT {
+ continue
+ }
+ ccf := &wire.ConnectionCloseFrame{
+ IsApplicationError: isApplicationError,
+ ErrorCode: errorCode,
+ FrameType: frameType,
+ ReasonPhrase: reason,
+ }
+ // don't send application errors in Initial or Handshake packets
+ if isApplicationError && (encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake) {
+ ccf.IsApplicationError = false
+ ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode)
+ ccf.ReasonPhrase = ""
+ }
+ pl := payload{
+ frames: []*ackhandler.Frame{{Frame: ccf}},
+ length: ccf.Length(v),
+ }
+
+ var sealer sealer
+ var err error
+ switch encLevel {
+ case protocol.EncryptionInitial:
+ sealer, err = p.cryptoSetup.GetInitialSealer()
+ case protocol.EncryptionHandshake:
+ sealer, err = p.cryptoSetup.GetHandshakeSealer()
+ case protocol.Encryption0RTT:
+ sealer, err = p.cryptoSetup.Get0RTTSealer()
+ case protocol.Encryption1RTT:
+ var s handshake.ShortHeaderSealer
+ s, err = p.cryptoSetup.Get1RTTSealer()
+ if err == nil {
+ keyPhase = s.KeyPhase()
+ }
+ sealer = s
+ }
+ if err == handshake.ErrKeysNotYetAvailable || err == handshake.ErrKeysDropped {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ sealers[i] = sealer
+ var hdr *wire.ExtendedHeader
+ if encLevel == protocol.Encryption1RTT {
+ connID = p.getDestConnID()
+ oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
+ size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, pl)
+ } else {
+ hdr = p.getLongHeader(encLevel, v)
+ hdrs[i] = hdr
+ size += p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead())
+ numLongHdrPackets++
+ }
+ payloads[i] = pl
+ }
+ buffer := getPacketBuffer()
+ packet := &coalescedPacket{
+ buffer: buffer,
+ longHdrPackets: make([]*longHeaderPacket, 0, numLongHdrPackets),
+ }
+ for i, encLevel := range encLevels {
+ if sealers[i] == nil {
+ continue
+ }
+ var paddingLen protocol.ByteCount
+ if encLevel == protocol.EncryptionInitial {
+ paddingLen = p.initialPaddingLen(payloads[i].frames, size)
+ }
+ if encLevel == protocol.Encryption1RTT {
+ ap, ack, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], paddingLen, sealers[i], false, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.shortHdrPacket = &shortHeaderPacket{
+ Packet: ap,
+ DestConnID: connID,
+ Ack: ack,
+ PacketNumberLen: oneRTTPacketNumberLen,
+ KeyPhase: keyPhase,
+ }
+ } else {
+ longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], v)
+ if err != nil {
+ return nil, err
+ }
+ packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket)
+ }
+ }
+ return packet, nil
+}
+
+// longHeaderPacketLength calculates the length of a serialized long header packet.
+// It takes into account that packets that have a tiny payload need to be padded,
+// such that len(payload) + packet number len >= 4 + AEAD overhead
+func (p *packetPacker) longHeaderPacketLength(hdr *wire.ExtendedHeader, pl payload, v protocol.VersionNumber) protocol.ByteCount {
+ var paddingLen protocol.ByteCount
+ pnLen := protocol.ByteCount(hdr.PacketNumberLen)
+ if pl.length < 4-pnLen {
+ paddingLen = 4 - pnLen - pl.length
+ }
+ return hdr.GetLength(v) + pl.length + paddingLen
+}
+
+// shortHeaderPacketLength calculates the length of a serialized short header packet.
+// It takes into account that packets that have a tiny payload need to be padded,
+// such that len(payload) + packet number len >= 4 + AEAD overhead
+func (p *packetPacker) shortHeaderPacketLength(connID protocol.ConnectionID, pnLen protocol.PacketNumberLen, pl payload) protocol.ByteCount {
+ var paddingLen protocol.ByteCount
+ if pl.length < 4-protocol.ByteCount(pnLen) {
+ paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length
+ }
+ return wire.ShortHeaderLen(connID, pnLen) + pl.length + paddingLen
+}
+
+// size is the expected size of the packet, if no padding was applied.
+func (p *packetPacker) initialPaddingLen(frames []*ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount {
+ // For the server, only ack-eliciting Initial packets need to be padded.
+ if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) {
+ return 0
+ }
+ if size >= p.maxPacketSize {
+ return 0
+ }
+ return p.maxPacketSize - size
+}
+
+// PackCoalescedPacket packs a new packet.
+// It packs an Initial / Handshake if there is data to send in these packet number spaces.
+// It should only be called before the handshake is confirmed.
+func (p *packetPacker) PackCoalescedPacket(onlyAck bool, v protocol.VersionNumber) (*coalescedPacket, error) {
+ maxPacketSize := p.maxPacketSize
+ if p.perspective == protocol.PerspectiveClient {
+ maxPacketSize = protocol.MinInitialPacketSize
+ }
+ var (
+ initialHdr, handshakeHdr, zeroRTTHdr *wire.ExtendedHeader
+ initialPayload, handshakePayload, zeroRTTPayload, oneRTTPayload payload
+ oneRTTPacketNumber protocol.PacketNumber
+ oneRTTPacketNumberLen protocol.PacketNumberLen
+ )
+ // Try packing an Initial packet.
+ initialSealer, err := p.cryptoSetup.GetInitialSealer()
+ if err != nil && err != handshake.ErrKeysDropped {
+ return nil, err
+ }
+ var size protocol.ByteCount
+ if initialSealer != nil {
+ initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true, v)
+ if initialPayload.length > 0 {
+ size += p.longHeaderPacketLength(initialHdr, initialPayload, v) + protocol.ByteCount(initialSealer.Overhead())
+ }
+ }
+
+ // Add a Handshake packet.
+ var handshakeSealer sealer
+ if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) {
+ var err error
+ handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer()
+ if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable {
+ return nil, err
+ }
+ if handshakeSealer != nil {
+ handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0, v)
+ if handshakePayload.length > 0 {
+ s := p.longHeaderPacketLength(handshakeHdr, handshakePayload, v) + protocol.ByteCount(handshakeSealer.Overhead())
+ size += s
+ }
+ }
+ }
+
+ // Add a 0-RTT / 1-RTT packet.
+ var zeroRTTSealer sealer
+ var oneRTTSealer handshake.ShortHeaderSealer
+ var connID protocol.ConnectionID
+ var kp protocol.KeyPhaseBit
+ if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) {
+ var err error
+ oneRTTSealer, err = p.cryptoSetup.Get1RTTSealer()
+ if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable {
+ return nil, err
+ }
+ if err == nil { // 1-RTT
+ kp = oneRTTSealer.KeyPhase()
+ connID = p.getDestConnID()
+ oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
+ hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen)
+ oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxPacketSize-size, onlyAck, size == 0, v)
+ if oneRTTPayload.length > 0 {
+ size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead())
+ }
+ } else if p.perspective == protocol.PerspectiveClient { // 0-RTT
+ var err error
+ zeroRTTSealer, err = p.cryptoSetup.Get0RTTSealer()
+ if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable {
+ return nil, err
+ }
+ if zeroRTTSealer != nil {
+ zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxPacketSize-size, v)
+ if zeroRTTPayload.length > 0 {
+ size += p.longHeaderPacketLength(zeroRTTHdr, zeroRTTPayload, v) + protocol.ByteCount(zeroRTTSealer.Overhead())
+ }
+ }
+ }
+ }
+
+ if initialPayload.length == 0 && handshakePayload.length == 0 && zeroRTTPayload.length == 0 && oneRTTPayload.length == 0 {
+ return nil, nil
+ }
+
+ buffer := getPacketBuffer()
+ packet := &coalescedPacket{
+ buffer: buffer,
+ longHdrPackets: make([]*longHeaderPacket, 0, 3),
+ }
+ if initialPayload.length > 0 {
+ padding := p.initialPaddingLen(initialPayload.frames, size)
+ cont, err := p.appendLongHeaderPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.longHdrPackets = append(packet.longHdrPackets, cont)
+ }
+ if handshakePayload.length > 0 {
+ cont, err := p.appendLongHeaderPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.longHdrPackets = append(packet.longHdrPackets, cont)
+ }
+ if zeroRTTPayload.length > 0 {
+ longHdrPacket, err := p.appendLongHeaderPacket(buffer, zeroRTTHdr, zeroRTTPayload, 0, protocol.Encryption0RTT, zeroRTTSealer, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket)
+ } else if oneRTTPayload.length > 0 {
+ ap, ack, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, oneRTTSealer, false, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.shortHdrPacket = &shortHeaderPacket{
+ Packet: ap,
+ DestConnID: connID,
+ Ack: ack,
+ PacketNumberLen: oneRTTPacketNumberLen,
+ KeyPhase: kp,
+ }
+ }
+ return packet, nil
+}
+
+// PackPacket packs a packet in the application data packet number space.
+// It should be called after the handshake is confirmed.
+func (p *packetPacker) PackPacket(onlyAck bool, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) {
+ sealer, err := p.cryptoSetup.Get1RTTSealer()
+ if err != nil {
+ return shortHeaderPacket{}, nil, err
+ }
+ pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
+ connID := p.getDestConnID()
+ hdrLen := wire.ShortHeaderLen(connID, pnLen)
+ pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, p.maxPacketSize, onlyAck, true, v)
+ if pl.length == 0 {
+ return shortHeaderPacket{}, nil, errNothingToPack
+ }
+ kp := sealer.KeyPhase()
+ buffer := getPacketBuffer()
+ ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, sealer, false, v)
+ if err != nil {
+ return shortHeaderPacket{}, nil, err
+ }
+ return shortHeaderPacket{
+ Packet: ap,
+ DestConnID: connID,
+ Ack: ack,
+ PacketNumberLen: pnLen,
+ KeyPhase: kp,
+ }, buffer, nil
+}
+
+func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool, v protocol.VersionNumber) (*wire.ExtendedHeader, payload) {
+ if onlyAck {
+ if ack := p.acks.GetAckFrame(encLevel, true); ack != nil {
+ return p.getLongHeader(encLevel, v), payload{
+ ack: ack,
+ length: ack.Length(v),
+ }
+ }
+ return nil, payload{}
+ }
+
+ var s cryptoStream
+ var hasRetransmission bool
+ //nolint:exhaustive // Initial and Handshake are the only two encryption levels here.
+ switch encLevel {
+ case protocol.EncryptionInitial:
+ s = p.initialStream
+ hasRetransmission = p.retransmissionQueue.HasInitialData()
+ case protocol.EncryptionHandshake:
+ s = p.handshakeStream
+ hasRetransmission = p.retransmissionQueue.HasHandshakeData()
+ }
+
+ hasData := s.HasData()
+ var ack *wire.AckFrame
+ if ackAllowed {
+ ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData)
+ }
+ if !hasData && !hasRetransmission && ack == nil {
+ // nothing to send
+ return nil, payload{}
+ }
+
+ var pl payload
+ if ack != nil {
+ pl.ack = ack
+ pl.length = ack.Length(v)
+ maxPacketSize -= pl.length
+ }
+ hdr := p.getLongHeader(encLevel, v)
+ maxPacketSize -= hdr.GetLength(v)
+ if hasRetransmission {
+ for {
+ var f wire.Frame
+ //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s
+ switch encLevel {
+ case protocol.EncryptionInitial:
+ f = p.retransmissionQueue.GetInitialFrame(maxPacketSize, v)
+ case protocol.EncryptionHandshake:
+ f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize, v)
+ }
+ if f == nil {
+ break
+ }
+ af := ackhandler.GetFrame()
+ af.Frame = f
+ pl.frames = append(pl.frames, af)
+ frameLen := f.Length(v)
+ pl.length += frameLen
+ maxPacketSize -= frameLen
+ }
+ } else if s.HasData() {
+ cf := s.PopCryptoFrame(maxPacketSize)
+ pl.frames = []*ackhandler.Frame{{Frame: cf}}
+ pl.length += cf.Length(v)
+ }
+ return hdr, pl
+}
+
+func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount, v protocol.VersionNumber) (*wire.ExtendedHeader, payload) {
+ if p.perspective != protocol.PerspectiveClient {
+ return nil, payload{}
+ }
+
+ hdr := p.getLongHeader(protocol.Encryption0RTT, v)
+ maxPayloadSize := maxPacketSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead())
+ return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, v)
+}
+
+func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, hdrLen protocol.ByteCount, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload {
+ maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead())
+ return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, v)
+}
+
+func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload {
+ pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, v)
+
+ // check if we have anything to send
+ if len(pl.frames) == 0 {
+ if pl.ack == nil {
+ return payload{}
+ }
+ // the packet only contains an ACK
+ if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks {
+ ping := &wire.PingFrame{}
+ // don't retransmit the PING frame when it is lost
+ af := ackhandler.GetFrame()
+ af.Frame = ping
+ af.OnLost = func(wire.Frame) {}
+ pl.frames = append(pl.frames, af)
+ pl.length += ping.Length(v)
+ p.numNonAckElicitingAcks = 0
+ } else {
+ p.numNonAckElicitingAcks++
+ }
+ } else {
+ p.numNonAckElicitingAcks = 0
+ }
+ return pl
+}
+
+func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload {
+ if onlyAck {
+ if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil {
+ return payload{
+ ack: ack,
+ length: ack.Length(v),
+ }
+ }
+ return payload{}
+ }
+
+ pl := payload{frames: make([]*ackhandler.Frame, 0, 1)}
+
+ hasData := p.framer.HasData()
+ hasRetransmission := p.retransmissionQueue.HasAppData()
+
+ var hasAck bool
+ if ackAllowed {
+ if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil {
+ pl.ack = ack
+ pl.length += ack.Length(v)
+ hasAck = true
+ }
+ }
+
+ if p.datagramQueue != nil {
+ if f := p.datagramQueue.Peek(); f != nil {
+ size := f.Length(v)
+ if size <= maxFrameSize-pl.length {
+ af := ackhandler.GetFrame()
+ af.Frame = f
+ // set it to a no-op. Then we won't set the default callback, which would retransmit the frame.
+ af.OnLost = func(wire.Frame) {}
+ pl.frames = append(pl.frames, af)
+ pl.length += size
+ p.datagramQueue.Pop()
+ }
+ }
+ }
+
+ if hasAck && !hasData && !hasRetransmission {
+ return pl
+ }
+
+ if hasRetransmission {
+ for {
+ remainingLen := maxFrameSize - pl.length
+ if remainingLen < protocol.MinStreamFrameSize {
+ break
+ }
+ f := p.retransmissionQueue.GetAppDataFrame(remainingLen, v)
+ if f == nil {
+ break
+ }
+ af := ackhandler.GetFrame()
+ af.Frame = f
+ pl.frames = append(pl.frames, af)
+ pl.length += f.Length(v)
+ }
+ }
+
+ if hasData {
+ var lengthAdded protocol.ByteCount
+ pl.frames, lengthAdded = p.framer.AppendControlFrames(pl.frames, maxFrameSize-pl.length, v)
+ pl.length += lengthAdded
+
+ pl.frames, lengthAdded = p.framer.AppendStreamFrames(pl.frames, maxFrameSize-pl.length, v)
+ pl.length += lengthAdded
+ }
+ return pl
+}
+
+func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (*coalescedPacket, error) {
+ if encLevel == protocol.Encryption1RTT {
+ s, err := p.cryptoSetup.Get1RTTSealer()
+ if err != nil {
+ return nil, err
+ }
+ kp := s.KeyPhase()
+ connID := p.getDestConnID()
+ pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
+ hdrLen := wire.ShortHeaderLen(connID, pnLen)
+ pl := p.maybeGetAppDataPacket(p.maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, v)
+ if pl.length == 0 {
+ return nil, nil
+ }
+ buffer := getPacketBuffer()
+ packet := &coalescedPacket{buffer: buffer}
+ ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, s, false, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.shortHdrPacket = &shortHeaderPacket{
+ Packet: ap,
+ DestConnID: connID,
+ Ack: ack,
+ PacketNumberLen: pnLen,
+ KeyPhase: kp,
+ }
+ return packet, nil
+ }
+
+ var hdr *wire.ExtendedHeader
+ var pl payload
+ var sealer handshake.LongHeaderSealer
+ //nolint:exhaustive // Probe packets are never sent for 0-RTT.
+ switch encLevel {
+ case protocol.EncryptionInitial:
+ var err error
+ sealer, err = p.cryptoSetup.GetInitialSealer()
+ if err != nil {
+ return nil, err
+ }
+ hdr, pl = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true, v)
+ case protocol.EncryptionHandshake:
+ var err error
+ sealer, err = p.cryptoSetup.GetHandshakeSealer()
+ if err != nil {
+ return nil, err
+ }
+ hdr, pl = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true, v)
+ default:
+ panic("unknown encryption level")
+ }
+
+ if pl.length == 0 {
+ return nil, nil
+ }
+ buffer := getPacketBuffer()
+ packet := &coalescedPacket{buffer: buffer}
+ size := p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead())
+ var padding protocol.ByteCount
+ if encLevel == protocol.EncryptionInitial {
+ padding = p.initialPaddingLen(pl.frames, size)
+ }
+
+ longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdr, pl, padding, encLevel, sealer, v)
+ if err != nil {
+ return nil, err
+ }
+ packet.longHdrPackets = []*longHeaderPacket{longHdrPacket}
+ return packet, nil
+}
+
+func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) {
+ pl := payload{
+ frames: []*ackhandler.Frame{&ping},
+ length: ping.Length(v),
+ }
+ buffer := getPacketBuffer()
+ s, err := p.cryptoSetup.Get1RTTSealer()
+ if err != nil {
+ return shortHeaderPacket{}, nil, err
+ }
+ connID := p.getDestConnID()
+ pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
+ padding := size - p.shortHeaderPacketLength(connID, pnLen, pl) - protocol.ByteCount(s.Overhead())
+ kp := s.KeyPhase()
+ ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, padding, s, true, v)
+ if err != nil {
+ return shortHeaderPacket{}, nil, err
+ }
+ return shortHeaderPacket{
+ Packet: ap,
+ DestConnID: connID,
+ Ack: ack,
+ PacketNumberLen: pnLen,
+ KeyPhase: kp,
+ }, buffer, nil
+}
+
+func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel, v protocol.VersionNumber) *wire.ExtendedHeader {
+ pn, pnLen := p.pnManager.PeekPacketNumber(encLevel)
+ hdr := &wire.ExtendedHeader{
+ PacketNumber: pn,
+ PacketNumberLen: pnLen,
+ }
+ hdr.Version = v
+ hdr.SrcConnectionID = p.srcConnID
+ hdr.DestConnectionID = p.getDestConnID()
+
+ //nolint:exhaustive // 1-RTT packets are not long header packets.
+ switch encLevel {
+ case protocol.EncryptionInitial:
+ hdr.Type = protocol.PacketTypeInitial
+ hdr.Token = p.token
+ case protocol.EncryptionHandshake:
+ hdr.Type = protocol.PacketTypeHandshake
+ case protocol.Encryption0RTT:
+ hdr.Type = protocol.PacketType0RTT
+ }
+ return hdr
+}
+
+func (p *packetPacker) appendLongHeaderPacket(buffer *packetBuffer, header *wire.ExtendedHeader, pl payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, v protocol.VersionNumber) (*longHeaderPacket, error) {
+ var paddingLen protocol.ByteCount
+ pnLen := protocol.ByteCount(header.PacketNumberLen)
+ if pl.length < 4-pnLen {
+ paddingLen = 4 - pnLen - pl.length
+ }
+ paddingLen += padding
+ header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + pl.length + paddingLen
+
+ startLen := len(buffer.Data)
+ raw := buffer.Data[startLen:]
+ raw, err := header.Append(raw, v)
+ if err != nil {
+ return nil, err
+ }
+ payloadOffset := protocol.ByteCount(len(raw))
+
+ pn := p.pnManager.PopPacketNumber(encLevel)
+ if pn != header.PacketNumber {
+ return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match")
+ }
+
+ raw, err = p.appendPacketPayload(raw, pl, paddingLen, v)
+ if err != nil {
+ return nil, err
+ }
+ raw = p.encryptPacket(raw, sealer, pn, payloadOffset, pnLen)
+ buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)]
+
+ return &longHeaderPacket{
+ header: header,
+ ack: pl.ack,
+ frames: pl.frames,
+ length: protocol.ByteCount(len(raw)),
+ }, nil
+}
+
+func (p *packetPacker) appendShortHeaderPacket(
+ buffer *packetBuffer,
+ connID protocol.ConnectionID,
+ pn protocol.PacketNumber,
+ pnLen protocol.PacketNumberLen,
+ kp protocol.KeyPhaseBit,
+ pl payload,
+ padding protocol.ByteCount,
+ sealer sealer,
+ isMTUProbePacket bool,
+ v protocol.VersionNumber,
+) (*ackhandler.Packet, *wire.AckFrame, error) {
+ var paddingLen protocol.ByteCount
+ if pl.length < 4-protocol.ByteCount(pnLen) {
+ paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length
+ }
+ paddingLen += padding
+
+ startLen := len(buffer.Data)
+ raw := buffer.Data[startLen:]
+ raw, err := wire.AppendShortHeader(raw, connID, pn, pnLen, kp)
+ if err != nil {
+ return nil, nil, err
+ }
+ payloadOffset := protocol.ByteCount(len(raw))
+
+ if pn != p.pnManager.PopPacketNumber(protocol.Encryption1RTT) {
+ return nil, nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match")
+ }
+
+ raw, err = p.appendPacketPayload(raw, pl, paddingLen, v)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !isMTUProbePacket {
+ if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > p.maxPacketSize {
+ return nil, nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize)
+ }
+ }
+ raw = p.encryptPacket(raw, sealer, pn, payloadOffset, protocol.ByteCount(pnLen))
+ buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)]
+
+ // create the ackhandler.Packet
+ largestAcked := protocol.InvalidPacketNumber
+ if pl.ack != nil {
+ largestAcked = pl.ack.LargestAcked()
+ }
+ for i := range pl.frames {
+ if pl.frames[i].OnLost != nil {
+ continue
+ }
+ pl.frames[i].OnLost = p.retransmissionQueue.AddAppData
+ }
+
+ ap := ackhandler.GetPacket()
+ ap.PacketNumber = pn
+ ap.LargestAcked = largestAcked
+ ap.Frames = pl.frames
+ ap.Length = protocol.ByteCount(len(raw))
+ ap.EncryptionLevel = protocol.Encryption1RTT
+ ap.SendTime = time.Now()
+ ap.IsPathMTUProbePacket = isMTUProbePacket
+
+ return ap, pl.ack, nil
+}
+
+func (p *packetPacker) appendPacketPayload(raw []byte, pl payload, paddingLen protocol.ByteCount, v protocol.VersionNumber) ([]byte, error) {
+ payloadOffset := len(raw)
+ if pl.ack != nil {
+ var err error
+ raw, err = pl.ack.Append(raw, v)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if paddingLen > 0 {
+ raw = append(raw, make([]byte, paddingLen)...)
+ }
+ for _, frame := range pl.frames {
+ var err error
+ raw, err = frame.Append(raw, v)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != pl.length {
+ return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", pl.length, payloadSize)
+ }
+ return raw, nil
+}
+
+func (p *packetPacker) encryptPacket(raw []byte, sealer sealer, pn protocol.PacketNumber, payloadOffset, pnLen protocol.ByteCount) []byte {
+ _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], pn, raw[:payloadOffset])
+ raw = raw[:len(raw)+sealer.Overhead()]
+ // apply header protection
+ pnOffset := payloadOffset - pnLen
+ sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[0], raw[pnOffset:payloadOffset])
+ return raw
+}
+
+func (p *packetPacker) SetToken(token []byte) {
+ p.token = token
+}
+
+// When a higher MTU is discovered, use it.
+func (p *packetPacker) SetMaxPacketSize(s protocol.ByteCount) {
+ p.maxPacketSize = s
+}
+
+// If the peer sets a max_packet_size that's smaller than the size we're currently using,
+// we need to reduce the size of packets we send.
+func (p *packetPacker) HandleTransportParameters(params *wire.TransportParameters) {
+ if params.MaxUDPPayloadSize != 0 {
+ p.maxPacketSize = utils.Min(p.maxPacketSize, params.MaxUDPPayloadSize)
+ }
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go b/vendor/github.com/quic-go/quic-go/packet_unpacker.go
similarity index 55%
rename from vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go
rename to vendor/github.com/quic-go/quic-go/packet_unpacker.go
index f70d8d075..103524c7d 100644
--- a/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go
+++ b/vendor/github.com/quic-go/quic-go/packet_unpacker.go
@@ -5,9 +5,10 @@ import (
"fmt"
"time"
- "github.com/lucas-clemente/quic-go/internal/handshake"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/handshake"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/wire"
)
type headerDecryptor interface {
@@ -27,7 +28,6 @@ func (e *headerParseError) Error() string {
}
type unpackedPacket struct {
- packetNumber protocol.PacketNumber // the decoded packet number
hdr *wire.ExtendedHeader
encryptionLevel protocol.EncryptionLevel
data []byte
@@ -37,22 +37,23 @@ type unpackedPacket struct {
type packetUnpacker struct {
cs handshake.CryptoSetup
- version protocol.VersionNumber
+ shortHdrConnIDLen int
}
var _ unpacker = &packetUnpacker{}
-func newPacketUnpacker(cs handshake.CryptoSetup, version protocol.VersionNumber) unpacker {
+func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetUnpacker {
return &packetUnpacker{
- cs: cs,
- version: version,
+ cs: cs,
+ shortHdrConnIDLen: shortHdrConnIDLen,
}
}
+// UnpackLongHeader unpacks a Long Header packet.
// If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits.
// If any other error occurred when parsing the header, the error is of type headerParseError.
// If decrypting the payload fails for any reason, the error is the error returned by the AEAD.
-func (u *packetUnpacker) Unpack(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) {
+func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error) {
var encLevel protocol.EncryptionLevel
var extHdr *wire.ExtendedHeader
var decrypted []byte
@@ -64,7 +65,7 @@ func (u *packetUnpacker) Unpack(hdr *wire.Header, rcvTime time.Time, data []byte
if err != nil {
return nil, err
}
- extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data)
+ extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v)
if err != nil {
return nil, err
}
@@ -74,7 +75,7 @@ func (u *packetUnpacker) Unpack(hdr *wire.Header, rcvTime time.Time, data []byte
if err != nil {
return nil, err
}
- extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data)
+ extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v)
if err != nil {
return nil, err
}
@@ -84,35 +85,48 @@ func (u *packetUnpacker) Unpack(hdr *wire.Header, rcvTime time.Time, data []byte
if err != nil {
return nil, err
}
- extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data)
+ extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v)
if err != nil {
return nil, err
}
default:
- if hdr.IsLongHeader {
- return nil, fmt.Errorf("unknown packet type: %s", hdr.Type)
- }
- encLevel = protocol.Encryption1RTT
- opener, err := u.cs.Get1RTTOpener()
- if err != nil {
- return nil, err
- }
- extHdr, decrypted, err = u.unpackShortHeaderPacket(opener, hdr, rcvTime, data)
- if err != nil {
- return nil, err
+ return nil, fmt.Errorf("unknown packet type: %s", hdr.Type)
+ }
+
+ if len(decrypted) == 0 {
+ return nil, &qerr.TransportError{
+ ErrorCode: qerr.ProtocolViolation,
+ ErrorMessage: "empty packet",
}
}
return &unpackedPacket{
hdr: extHdr,
- packetNumber: extHdr.PacketNumber,
encryptionLevel: encLevel,
data: decrypted,
}, nil
}
-func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) {
- extHdr, parseErr := u.unpackHeader(opener, hdr, data)
+func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
+ opener, err := u.cs.Get1RTTOpener()
+ if err != nil {
+ return 0, 0, 0, nil, err
+ }
+ pn, pnLen, kp, decrypted, err := u.unpackShortHeaderPacket(opener, rcvTime, data)
+ if err != nil {
+ return 0, 0, 0, nil, err
+ }
+ if len(decrypted) == 0 {
+ return 0, 0, 0, nil, &qerr.TransportError{
+ ErrorCode: qerr.ProtocolViolation,
+ ErrorMessage: "empty packet",
+ }
+ }
+ return pn, pnLen, kp, decrypted, nil
+}
+
+func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, []byte, error) {
+ extHdr, parseErr := u.unpackLongHeader(opener, hdr, data, v)
// If the reserved bits are set incorrectly, we still need to continue unpacking.
// This avoids a timing side-channel, which otherwise might allow an attacker
// to gain information about the header encryption.
@@ -131,41 +145,57 @@ func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpene
return extHdr, decrypted, nil
}
-func (u *packetUnpacker) unpackShortHeaderPacket(
- opener handshake.ShortHeaderOpener,
- hdr *wire.Header,
- rcvTime time.Time,
- data []byte,
-) (*wire.ExtendedHeader, []byte, error) {
- extHdr, parseErr := u.unpackHeader(opener, hdr, data)
+func (u *packetUnpacker) unpackShortHeaderPacket(opener handshake.ShortHeaderOpener, rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) {
+ l, pn, pnLen, kp, parseErr := u.unpackShortHeader(opener, data)
// If the reserved bits are set incorrectly, we still need to continue unpacking.
// This avoids a timing side-channel, which otherwise might allow an attacker
// to gain information about the header encryption.
if parseErr != nil && parseErr != wire.ErrInvalidReservedBits {
- return nil, nil, parseErr
+ return 0, 0, 0, nil, &headerParseError{parseErr}
}
- extHdr.PacketNumber = opener.DecodePacketNumber(extHdr.PacketNumber, extHdr.PacketNumberLen)
- extHdrLen := extHdr.ParsedLen()
- decrypted, err := opener.Open(data[extHdrLen:extHdrLen], data[extHdrLen:], rcvTime, extHdr.PacketNumber, extHdr.KeyPhase, data[:extHdrLen])
+ pn = opener.DecodePacketNumber(pn, pnLen)
+ decrypted, err := opener.Open(data[l:l], data[l:], rcvTime, pn, kp, data[:l])
if err != nil {
- return nil, nil, err
+ return 0, 0, 0, nil, err
}
- if parseErr != nil {
- return nil, nil, parseErr
+ return pn, pnLen, kp, decrypted, parseErr
+}
+
+func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int, protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, error) {
+ hdrLen := 1 /* first header byte */ + u.shortHdrConnIDLen
+ if len(data) < hdrLen+4+16 {
+ return 0, 0, 0, 0, fmt.Errorf("packet too small, expected at least 20 bytes after the header, got %d", len(data)-hdrLen)
}
- return extHdr, decrypted, nil
+ origPNBytes := make([]byte, 4)
+ copy(origPNBytes, data[hdrLen:hdrLen+4])
+ // 2. decrypt the header, assuming a 4 byte packet number
+ hd.DecryptHeader(
+ data[hdrLen+4:hdrLen+4+16],
+ &data[0],
+ data[hdrLen:hdrLen+4],
+ )
+ // 3. parse the header (and learn the actual length of the packet number)
+ l, pn, pnLen, kp, parseErr := wire.ParseShortHeader(data, u.shortHdrConnIDLen)
+ if parseErr != nil && parseErr != wire.ErrInvalidReservedBits {
+ return l, pn, pnLen, kp, parseErr
+ }
+ // 4. if the packet number is shorter than 4 bytes, replace the remaining bytes with the copy we saved earlier
+ if pnLen != protocol.PacketNumberLen4 {
+ copy(data[hdrLen+int(pnLen):hdrLen+4], origPNBytes[int(pnLen):])
+ }
+ return l, pn, pnLen, kp, parseErr
}
// The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError.
-func (u *packetUnpacker) unpackHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) {
- extHdr, err := unpackHeader(hd, hdr, data, u.version)
+func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) {
+ extHdr, err := unpackLongHeader(hd, hdr, data, v)
if err != nil && err != wire.ErrInvalidReservedBits {
return nil, &headerParseError{err: err}
}
return extHdr, err
}
-func unpackHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version protocol.VersionNumber) (*wire.ExtendedHeader, error) {
+func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) {
r := bytes.NewReader(data)
hdrLen := hdr.ParsedLen()
@@ -184,7 +214,7 @@ func unpackHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version pro
data[hdrLen:hdrLen+4],
)
// 3. parse the header (and learn the actual length of the packet number)
- extHdr, parseErr := hdr.ParseExtended(r, version)
+ extHdr, parseErr := hdr.ParseExtended(r, v)
if parseErr != nil && parseErr != wire.ErrInvalidReservedBits {
return nil, parseErr
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/event.go b/vendor/github.com/quic-go/quic-go/qlog/event.go
similarity index 93%
rename from vendor/github.com/lucas-clemente/quic-go/qlog/event.go
rename to vendor/github.com/quic-go/quic-go/qlog/event.go
index 8427b6e22..fbbdf1acd 100644
--- a/vendor/github.com/lucas-clemente/quic-go/qlog/event.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/event.go
@@ -6,10 +6,10 @@ import (
"net"
"time"
- "github.com/lucas-clemente/quic-go"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/logging"
"github.com/francoispqt/gojay"
)
@@ -80,8 +80,8 @@ func (e eventConnectionStarted) MarshalJSONObject(enc *gojay.Encoder) {
enc.IntKey("src_port", e.SrcAddr.Port)
enc.StringKey("dst_ip", e.DestAddr.IP.String())
enc.IntKey("dst_port", e.DestAddr.Port)
- enc.StringKey("src_cid", connectionID(e.SrcConnectionID).String())
- enc.StringKey("dst_cid", connectionID(e.DestConnectionID).String())
+ enc.StringKey("src_cid", e.SrcConnectionID.String())
+ enc.StringKey("dst_cid", e.DestConnectionID.String())
}
type eventVersionNegotiated struct {
@@ -154,7 +154,7 @@ func (e eventConnectionClosed) MarshalJSONObject(enc *gojay.Encoder) {
}
type eventPacketSent struct {
- Header packetHeader
+ Header gojay.MarshalerJSONObject // either a shortHeader or a packetHeader
Length logging.ByteCount
PayloadLength logging.ByteCount
Frames frames
@@ -177,7 +177,7 @@ func (e eventPacketSent) MarshalJSONObject(enc *gojay.Encoder) {
}
type eventPacketReceived struct {
- Header packetHeader
+ Header gojay.MarshalerJSONObject // either a shortHeader or a packetHeader
Length logging.ByteCount
PayloadLength logging.ByteCount
Frames frames
@@ -212,7 +212,7 @@ func (e eventRetryReceived) MarshalJSONObject(enc *gojay.Encoder) {
}
type eventVersionNegotiationReceived struct {
- Header packetHeader
+ Header packetHeaderVersionNegotiation
SupportedVersions []versionNumber
}
@@ -227,6 +227,7 @@ func (e eventVersionNegotiationReceived) MarshalJSONObject(enc *gojay.Encoder) {
type eventPacketBuffered struct {
PacketType logging.PacketType
+ PacketSize protocol.ByteCount
}
func (e eventPacketBuffered) Category() category { return categoryTransport }
@@ -236,6 +237,7 @@ func (e eventPacketBuffered) IsNil() bool { return false }
func (e eventPacketBuffered) MarshalJSONObject(enc *gojay.Encoder) {
//nolint:gosimple
enc.ObjectKey("header", packetHeaderWithType{PacketType: e.PacketType})
+ enc.ObjectKey("raw", rawInfo{Length: e.PacketSize})
enc.StringKey("trigger", "keys_unavailable")
}
@@ -349,16 +351,16 @@ func (e eventKeyUpdated) MarshalJSONObject(enc *gojay.Encoder) {
}
}
-type eventKeyRetired struct {
+type eventKeyDiscarded struct {
KeyType keyType
Generation protocol.KeyPhase
}
-func (e eventKeyRetired) Category() category { return categorySecurity }
-func (e eventKeyRetired) Name() string { return "key_retired" }
-func (e eventKeyRetired) IsNil() bool { return false }
+func (e eventKeyDiscarded) Category() category { return categorySecurity }
+func (e eventKeyDiscarded) Name() string { return "key_discarded" }
+func (e eventKeyDiscarded) IsNil() bool { return false }
-func (e eventKeyRetired) MarshalJSONObject(enc *gojay.Encoder) {
+func (e eventKeyDiscarded) MarshalJSONObject(enc *gojay.Encoder) {
if e.KeyType != keyTypeClient1RTT && e.KeyType != keyTypeServer1RTT {
enc.StringKey("trigger", "tls")
}
@@ -410,15 +412,15 @@ func (e eventTransportParameters) MarshalJSONObject(enc *gojay.Encoder) {
if !e.Restore {
enc.StringKey("owner", e.Owner.String())
if e.SentBy == protocol.PerspectiveServer {
- enc.StringKey("original_destination_connection_id", connectionID(e.OriginalDestinationConnectionID).String())
+ enc.StringKey("original_destination_connection_id", e.OriginalDestinationConnectionID.String())
if e.StatelessResetToken != nil {
enc.StringKey("stateless_reset_token", fmt.Sprintf("%x", e.StatelessResetToken[:]))
}
if e.RetrySourceConnectionID != nil {
- enc.StringKey("retry_source_connection_id", connectionID(*e.RetrySourceConnectionID).String())
+ enc.StringKey("retry_source_connection_id", (*e.RetrySourceConnectionID).String())
}
}
- enc.StringKey("initial_source_connection_id", connectionID(e.InitialSourceConnectionID).String())
+ enc.StringKey("initial_source_connection_id", e.InitialSourceConnectionID.String())
}
enc.BoolKey("disable_active_migration", e.DisableActiveMigration)
enc.FloatKeyOmitEmpty("max_idle_timeout", milliseconds(e.MaxIdleTimeout))
@@ -457,7 +459,7 @@ func (a preferredAddress) MarshalJSONObject(enc *gojay.Encoder) {
enc.Uint16Key("port_v4", a.PortV4)
enc.StringKey("ip_v6", a.IPv6.String())
enc.Uint16Key("port_v6", a.PortV6)
- enc.StringKey("connection_id", connectionID(a.ConnectionID).String())
+ enc.StringKey("connection_id", a.ConnectionID.String())
enc.StringKey("stateless_reset_token", fmt.Sprintf("%x", a.StatelessResetToken))
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/frame.go b/vendor/github.com/quic-go/quic-go/qlog/frame.go
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/qlog/frame.go
rename to vendor/github.com/quic-go/quic-go/qlog/frame.go
index 4530f0fba..0d44f073b 100644
--- a/vendor/github.com/lucas-clemente/quic-go/qlog/frame.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/frame.go
@@ -3,8 +3,8 @@ package qlog
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
"github.com/francoispqt/gojay"
)
@@ -182,7 +182,7 @@ func marshalNewConnectionIDFrame(enc *gojay.Encoder, f *logging.NewConnectionIDF
enc.Int64Key("sequence_number", int64(f.SequenceNumber))
enc.Int64Key("retire_prior_to", int64(f.RetirePriorTo))
enc.IntKey("length", f.ConnectionID.Len())
- enc.StringKey("connection_id", connectionID(f.ConnectionID).String())
+ enc.StringKey("connection_id", f.ConnectionID.String())
enc.StringKey("stateless_reset_token", fmt.Sprintf("%x", f.StatelessResetToken))
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/packet_header.go b/vendor/github.com/quic-go/quic-go/qlog/packet_header.go
similarity index 63%
rename from vendor/github.com/lucas-clemente/quic-go/qlog/packet_header.go
rename to vendor/github.com/quic-go/quic-go/qlog/packet_header.go
index cc270f2f5..106499b05 100644
--- a/vendor/github.com/lucas-clemente/quic-go/qlog/packet_header.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/packet_header.go
@@ -3,9 +3,8 @@ package qlog
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/logging"
"github.com/francoispqt/gojay"
)
@@ -37,6 +36,7 @@ func (t token) MarshalJSONObject(enc *gojay.Encoder) {
}
// PacketHeader is a QUIC packet header.
+// TODO: make this a long header
type packetHeader struct {
PacketType logging.PacketType
@@ -50,7 +50,7 @@ type packetHeader struct {
Token *token
}
-func transformHeader(hdr *wire.Header) *packetHeader {
+func transformHeader(hdr *logging.Header) *packetHeader {
h := &packetHeader{
PacketType: logging.PacketTypeFromHeader(hdr),
SrcConnectionID: hdr.SrcConnectionID,
@@ -63,7 +63,7 @@ func transformHeader(hdr *wire.Header) *packetHeader {
return h
}
-func transformExtendedHeader(hdr *wire.ExtendedHeader) *packetHeader {
+func transformLongHeader(hdr *logging.ExtendedHeader) *packetHeader {
h := transformHeader(&hdr.Header)
h.PacketNumber = hdr.PacketNumber
h.KeyPhaseBit = hdr.KeyPhase
@@ -72,7 +72,7 @@ func transformExtendedHeader(hdr *wire.ExtendedHeader) *packetHeader {
func (h packetHeader) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey("packet_type", packetType(h.PacketType).String())
- if h.PacketType != logging.PacketTypeRetry && h.PacketType != logging.PacketTypeVersionNegotiation {
+ if h.PacketType != logging.PacketTypeRetry {
enc.Int64Key("packet_number", int64(h.PacketNumber))
}
if h.Version != 0 {
@@ -81,12 +81,12 @@ func (h packetHeader) MarshalJSONObject(enc *gojay.Encoder) {
if h.PacketType != logging.PacketType1RTT {
enc.IntKey("scil", h.SrcConnectionID.Len())
if h.SrcConnectionID.Len() > 0 {
- enc.StringKey("scid", connectionID(h.SrcConnectionID).String())
+ enc.StringKey("scid", h.SrcConnectionID.String())
}
}
enc.IntKey("dcil", h.DestConnectionID.Len())
if h.DestConnectionID.Len() > 0 {
- enc.StringKey("dcid", connectionID(h.DestConnectionID).String())
+ enc.StringKey("dcid", h.DestConnectionID.String())
}
if h.KeyPhaseBit == logging.KeyPhaseZero || h.KeyPhaseBit == logging.KeyPhaseOne {
enc.StringKey("key_phase_bit", h.KeyPhaseBit.String())
@@ -96,6 +96,20 @@ func (h packetHeader) MarshalJSONObject(enc *gojay.Encoder) {
}
}
+type packetHeaderVersionNegotiation struct {
+ SrcConnectionID logging.ArbitraryLenConnectionID
+ DestConnectionID logging.ArbitraryLenConnectionID
+}
+
+func (h packetHeaderVersionNegotiation) IsNil() bool { return false }
+func (h packetHeaderVersionNegotiation) MarshalJSONObject(enc *gojay.Encoder) {
+ enc.StringKey("packet_type", "version_negotiation")
+ enc.IntKey("scil", h.SrcConnectionID.Len())
+ enc.StringKey("scid", h.SrcConnectionID.String())
+ enc.IntKey("dcil", h.DestConnectionID.Len())
+ enc.StringKey("dcid", h.DestConnectionID.String())
+}
+
// a minimal header that only outputs the packet type
type packetHeaderWithType struct {
PacketType logging.PacketType
@@ -117,3 +131,27 @@ func (h packetHeaderWithTypeAndPacketNumber) MarshalJSONObject(enc *gojay.Encode
enc.StringKey("packet_type", packetType(h.PacketType).String())
enc.Int64Key("packet_number", int64(h.PacketNumber))
}
+
+type shortHeader struct {
+ DestConnectionID logging.ConnectionID
+ PacketNumber logging.PacketNumber
+ KeyPhaseBit logging.KeyPhaseBit
+}
+
+func transformShortHeader(hdr *logging.ShortHeader) *shortHeader {
+ return &shortHeader{
+ DestConnectionID: hdr.DestConnectionID,
+ PacketNumber: hdr.PacketNumber,
+ KeyPhaseBit: hdr.KeyPhase,
+ }
+}
+
+func (h shortHeader) IsNil() bool { return false }
+func (h shortHeader) MarshalJSONObject(enc *gojay.Encoder) {
+ enc.StringKey("packet_type", packetType(logging.PacketType1RTT).String())
+ if h.DestConnectionID.Len() > 0 {
+ enc.StringKey("dcid", h.DestConnectionID.String())
+ }
+ enc.Int64Key("packet_number", int64(h.PacketNumber))
+ enc.StringKey("key_phase_bit", h.KeyPhaseBit.String())
+}
diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/qlog.go b/vendor/github.com/quic-go/quic-go/qlog/qlog.go
similarity index 82%
rename from vendor/github.com/lucas-clemente/quic-go/qlog/qlog.go
rename to vendor/github.com/quic-go/quic-go/qlog/qlog.go
index 5b4117428..bc2bb233d 100644
--- a/vendor/github.com/lucas-clemente/quic-go/qlog/qlog.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/qlog.go
@@ -11,17 +11,17 @@ import (
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
"github.com/francoispqt/gojay"
)
// Setting of this only works when quic-go is used as a library.
// When building a binary from this repository, the version can be set using the following go build flag:
-// -ldflags="-X github.com/lucas-clemente/quic-go/qlog.quicGoVersion=foobar"
+// -ldflags="-X github.com/quic-go/quic-go/qlog.quicGoVersion=foobar"
var quicGoVersion = "(devel)"
func init() {
@@ -33,7 +33,7 @@ func init() {
return
}
for _, d := range info.Deps {
- if d.Path == "github.com/lucas-clemente/quic-go" {
+ if d.Path == "github.com/quic-go/quic-go" {
quicGoVersion = d.Version
if d.Replace != nil {
if len(d.Replace.Version) > 0 {
@@ -50,6 +50,8 @@ func init() {
const eventChanSize = 50
type tracer struct {
+ logging.NullTracer
+
getLogWriter func(p logging.Perspective, connectionID []byte) io.WriteCloser
}
@@ -67,10 +69,6 @@ func (t *tracer) TracerForConnection(_ context.Context, p logging.Perspective, o
return nil
}
-func (t *tracer) SentPacket(net.Addr, *logging.Header, protocol.ByteCount, []logging.Frame) {}
-func (t *tracer) DroppedPacket(net.Addr, logging.PacketType, protocol.ByteCount, logging.PacketDropReason) {
-}
-
type connectionTracer struct {
mutex sync.Mutex
@@ -110,8 +108,8 @@ func (t *connectionTracer) run() {
trace: trace{
VantagePoint: vantagePoint{Type: t.perspective},
CommonFields: commonFields{
- ODCID: connectionID(t.odcid),
- GroupID: connectionID(t.odcid),
+ ODCID: t.odcid,
+ GroupID: t.odcid,
ReferenceTime: t.referenceTime,
},
},
@@ -276,7 +274,15 @@ func (t *connectionTracer) toTransportParameters(tp *wire.TransportParameters) *
}
}
-func (t *connectionTracer) SentPacket(hdr *wire.ExtendedHeader, packetSize logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) {
+func (t *connectionTracer) SentLongHeaderPacket(hdr *logging.ExtendedHeader, packetSize logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) {
+ t.sentPacket(*transformLongHeader(hdr), packetSize, hdr.Length, ack, frames)
+}
+
+func (t *connectionTracer) SentShortHeaderPacket(hdr *logging.ShortHeader, packetSize logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) {
+ t.sentPacket(*transformShortHeader(hdr), packetSize, 0, ack, frames)
+}
+
+func (t *connectionTracer) sentPacket(hdr gojay.MarshalerJSONObject, packetSize, payloadLen logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) {
numFrames := len(frames)
if ack != nil {
numFrames++
@@ -288,9 +294,24 @@ func (t *connectionTracer) SentPacket(hdr *wire.ExtendedHeader, packetSize loggi
for _, f := range frames {
fs = append(fs, frame{Frame: f})
}
- header := *transformExtendedHeader(hdr)
t.mutex.Lock()
t.recordEvent(time.Now(), &eventPacketSent{
+ Header: hdr,
+ Length: packetSize,
+ PayloadLength: payloadLen,
+ Frames: fs,
+ })
+ t.mutex.Unlock()
+}
+
+func (t *connectionTracer) ReceivedLongHeaderPacket(hdr *logging.ExtendedHeader, packetSize logging.ByteCount, frames []logging.Frame) {
+ fs := make([]frame, len(frames))
+ for i, f := range frames {
+ fs[i] = frame{Frame: f}
+ }
+ header := *transformLongHeader(hdr)
+ t.mutex.Lock()
+ t.recordEvent(time.Now(), &eventPacketReceived{
Header: header,
Length: packetSize,
PayloadLength: hdr.Length,
@@ -299,17 +320,17 @@ func (t *connectionTracer) SentPacket(hdr *wire.ExtendedHeader, packetSize loggi
t.mutex.Unlock()
}
-func (t *connectionTracer) ReceivedPacket(hdr *wire.ExtendedHeader, packetSize logging.ByteCount, frames []logging.Frame) {
+func (t *connectionTracer) ReceivedShortHeaderPacket(hdr *logging.ShortHeader, packetSize logging.ByteCount, frames []logging.Frame) {
fs := make([]frame, len(frames))
for i, f := range frames {
fs[i] = frame{Frame: f}
}
- header := *transformExtendedHeader(hdr)
+ header := *transformShortHeader(hdr)
t.mutex.Lock()
t.recordEvent(time.Now(), &eventPacketReceived{
Header: header,
Length: packetSize,
- PayloadLength: hdr.Length,
+ PayloadLength: packetSize - wire.ShortHeaderLen(hdr.DestConnectionID, hdr.PacketNumberLen),
Frames: fs,
})
t.mutex.Unlock()
@@ -323,22 +344,28 @@ func (t *connectionTracer) ReceivedRetry(hdr *wire.Header) {
t.mutex.Unlock()
}
-func (t *connectionTracer) ReceivedVersionNegotiationPacket(hdr *wire.Header, versions []logging.VersionNumber) {
+func (t *connectionTracer) ReceivedVersionNegotiationPacket(dest, src logging.ArbitraryLenConnectionID, versions []logging.VersionNumber) {
ver := make([]versionNumber, len(versions))
for i, v := range versions {
ver[i] = versionNumber(v)
}
t.mutex.Lock()
t.recordEvent(time.Now(), &eventVersionNegotiationReceived{
- Header: *transformHeader(hdr),
+ Header: packetHeaderVersionNegotiation{
+ SrcConnectionID: src,
+ DestConnectionID: dest,
+ },
SupportedVersions: ver,
})
t.mutex.Unlock()
}
-func (t *connectionTracer) BufferedPacket(pt logging.PacketType) {
+func (t *connectionTracer) BufferedPacket(pt logging.PacketType, size protocol.ByteCount) {
t.mutex.Lock()
- t.recordEvent(time.Now(), &eventPacketBuffered{PacketType: pt})
+ t.recordEvent(time.Now(), &eventPacketBuffered{
+ PacketType: pt,
+ PacketSize: size,
+ })
t.mutex.Unlock()
}
@@ -428,10 +455,10 @@ func (t *connectionTracer) DroppedEncryptionLevel(encLevel protocol.EncryptionLe
t.mutex.Lock()
now := time.Now()
if encLevel == protocol.Encryption0RTT {
- t.recordEvent(now, &eventKeyRetired{KeyType: encLevelToKeyType(encLevel, t.perspective)})
+ t.recordEvent(now, &eventKeyDiscarded{KeyType: encLevelToKeyType(encLevel, t.perspective)})
} else {
- t.recordEvent(now, &eventKeyRetired{KeyType: encLevelToKeyType(encLevel, protocol.PerspectiveServer)})
- t.recordEvent(now, &eventKeyRetired{KeyType: encLevelToKeyType(encLevel, protocol.PerspectiveClient)})
+ t.recordEvent(now, &eventKeyDiscarded{KeyType: encLevelToKeyType(encLevel, protocol.PerspectiveServer)})
+ t.recordEvent(now, &eventKeyDiscarded{KeyType: encLevelToKeyType(encLevel, protocol.PerspectiveClient)})
}
t.mutex.Unlock()
}
@@ -439,11 +466,11 @@ func (t *connectionTracer) DroppedEncryptionLevel(encLevel protocol.EncryptionLe
func (t *connectionTracer) DroppedKey(generation protocol.KeyPhase) {
t.mutex.Lock()
now := time.Now()
- t.recordEvent(now, &eventKeyRetired{
+ t.recordEvent(now, &eventKeyDiscarded{
KeyType: encLevelToKeyType(protocol.Encryption1RTT, protocol.PerspectiveServer),
Generation: generation,
})
- t.recordEvent(now, &eventKeyRetired{
+ t.recordEvent(now, &eventKeyDiscarded{
KeyType: encLevelToKeyType(protocol.Encryption1RTT, protocol.PerspectiveClient),
Generation: generation,
})
diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/trace.go b/vendor/github.com/quic-go/quic-go/qlog/trace.go
similarity index 90%
rename from vendor/github.com/lucas-clemente/quic-go/qlog/trace.go
rename to vendor/github.com/quic-go/quic-go/qlog/trace.go
index 4f0b5e64e..fbd7e7399 100644
--- a/vendor/github.com/lucas-clemente/quic-go/qlog/trace.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/trace.go
@@ -3,9 +3,10 @@ package qlog
import (
"time"
- "github.com/francoispqt/gojay"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/logging"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/francoispqt/gojay"
)
type topLevel struct {
@@ -38,8 +39,8 @@ func (p vantagePoint) MarshalJSONObject(enc *gojay.Encoder) {
}
type commonFields struct {
- ODCID connectionID
- GroupID connectionID
+ ODCID logging.ConnectionID
+ GroupID logging.ConnectionID
ProtocolType string
ReferenceTime time.Time
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/types.go b/vendor/github.com/quic-go/quic-go/qlog/types.go
similarity index 96%
rename from vendor/github.com/lucas-clemente/quic-go/qlog/types.go
rename to vendor/github.com/quic-go/quic-go/qlog/types.go
index b485e17da..c47ad481e 100644
--- a/vendor/github.com/lucas-clemente/quic-go/qlog/types.go
+++ b/vendor/github.com/quic-go/quic-go/qlog/types.go
@@ -3,9 +3,9 @@ package qlog
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/logging"
)
type owner uint8
@@ -39,12 +39,6 @@ func (s streamType) String() string {
}
}
-type connectionID protocol.ConnectionID
-
-func (c connectionID) String() string {
- return fmt.Sprintf("%x", []byte(c))
-}
-
// category is the qlog event category.
type category uint8
diff --git a/vendor/github.com/lucas-clemente/quic-go/quicvarint/io.go b/vendor/github.com/quic-go/quic-go/quicvarint/io.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/quicvarint/io.go
rename to vendor/github.com/quic-go/quic-go/quicvarint/io.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/quicvarint/varint.go b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go
similarity index 73%
rename from vendor/github.com/lucas-clemente/quic-go/quicvarint/varint.go
rename to vendor/github.com/quic-go/quic-go/quicvarint/varint.go
index 66e0d39e6..cbebfe61f 100644
--- a/vendor/github.com/lucas-clemente/quic-go/quicvarint/varint.go
+++ b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go
@@ -4,7 +4,7 @@ import (
"fmt"
"io"
- "github.com/lucas-clemente/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/protocol"
)
// taken from the QUIC draft
@@ -71,6 +71,7 @@ func Read(r io.ByteReader) (uint64, error) {
}
// Write writes i in the QUIC varint format to w.
+// Deprecated: use Append instead.
func Write(w Writer, i uint64) {
if i <= maxVarInt1 {
w.WriteByte(uint8(i))
@@ -88,32 +89,52 @@ func Write(w Writer, i uint64) {
}
}
-// WriteWithLen writes i in the QUIC varint format with the desired length to w.
-func WriteWithLen(w Writer, i uint64, length protocol.ByteCount) {
+// Append appends i in the QUIC varint format.
+func Append(b []byte, i uint64) []byte {
+ if i <= maxVarInt1 {
+ return append(b, uint8(i))
+ }
+ if i <= maxVarInt2 {
+ return append(b, []byte{uint8(i>>8) | 0x40, uint8(i)}...)
+ }
+ if i <= maxVarInt4 {
+ return append(b, []byte{uint8(i>>24) | 0x80, uint8(i >> 16), uint8(i >> 8), uint8(i)}...)
+ }
+ if i <= maxVarInt8 {
+ return append(b, []byte{
+ uint8(i>>56) | 0xc0, uint8(i >> 48), uint8(i >> 40), uint8(i >> 32),
+ uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i),
+ }...)
+ }
+ panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i))
+}
+
+// AppendWithLen append i in the QUIC varint format with the desired length.
+func AppendWithLen(b []byte, i uint64, length protocol.ByteCount) []byte {
if length != 1 && length != 2 && length != 4 && length != 8 {
panic("invalid varint length")
}
l := Len(i)
if l == length {
- Write(w, i)
- return
+ return Append(b, i)
}
if l > length {
panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length))
}
if length == 2 {
- w.WriteByte(0b01000000)
+ b = append(b, 0b01000000)
} else if length == 4 {
- w.WriteByte(0b10000000)
+ b = append(b, 0b10000000)
} else if length == 8 {
- w.WriteByte(0b11000000)
+ b = append(b, 0b11000000)
}
for j := protocol.ByteCount(1); j < length-l; j++ {
- w.WriteByte(0)
+ b = append(b, 0)
}
for j := protocol.ByteCount(0); j < l; j++ {
- w.WriteByte(uint8(i >> (8 * (l - 1 - j))))
+ b = append(b, uint8(i>>(8*(l-1-j))))
}
+ return b
}
// Len determines the number of bytes that will be needed to write the number i.
diff --git a/vendor/github.com/lucas-clemente/quic-go/receive_stream.go b/vendor/github.com/quic-go/quic-go/receive_stream.go
similarity index 86%
rename from vendor/github.com/lucas-clemente/quic-go/receive_stream.go
rename to vendor/github.com/quic-go/quic-go/receive_stream.go
index ae6a449b5..0a7e94167 100644
--- a/vendor/github.com/lucas-clemente/quic-go/receive_stream.go
+++ b/vendor/github.com/quic-go/quic-go/receive_stream.go
@@ -6,11 +6,11 @@ import (
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/flowcontrol"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/flowcontrol"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type receiveStreamI interface {
@@ -34,24 +34,19 @@ type receiveStream struct {
currentFrame []byte
currentFrameDone func()
- currentFrameIsLast bool // is the currentFrame the last frame on this stream
readPosInFrame int
+ currentFrameIsLast bool // is the currentFrame the last frame on this stream
+ finRead bool // set once we read a frame with a Fin
closeForShutdownErr error
cancelReadErr error
resetRemotelyErr *StreamError
- closedForShutdown bool // set when CloseForShutdown() is called
- finRead bool // set once we read a frame with a Fin
- canceledRead bool // set when CancelRead() is called
- resetRemotely bool // set when HandleResetStreamFrame() is called
-
readChan chan struct{}
readOnce chan struct{} // cap: 1, to protect against concurrent use of Read
deadline time.Time
flowController flowcontrol.StreamFlowController
- version protocol.VersionNumber
}
var (
@@ -63,7 +58,6 @@ func newReceiveStream(
streamID protocol.StreamID,
sender streamSender,
flowController flowcontrol.StreamFlowController,
- version protocol.VersionNumber,
) *receiveStream {
return &receiveStream{
streamID: streamID,
@@ -73,7 +67,6 @@ func newReceiveStream(
readChan: make(chan struct{}, 1),
readOnce: make(chan struct{}, 1),
finalOffset: protocol.MaxByteCount,
- version: version,
}
}
@@ -103,13 +96,13 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err
if s.finRead {
return false, 0, io.EOF
}
- if s.canceledRead {
+ if s.cancelReadErr != nil {
return false, 0, s.cancelReadErr
}
- if s.resetRemotely {
+ if s.resetRemotelyErr != nil {
return false, 0, s.resetRemotelyErr
}
- if s.closedForShutdown {
+ if s.closeForShutdownErr != nil {
return false, 0, s.closeForShutdownErr
}
@@ -125,13 +118,13 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err
for {
// Stop waiting on errors
- if s.closedForShutdown {
+ if s.closeForShutdownErr != nil {
return false, bytesRead, s.closeForShutdownErr
}
- if s.canceledRead {
+ if s.cancelReadErr != nil {
return false, bytesRead, s.cancelReadErr
}
- if s.resetRemotely {
+ if s.resetRemotelyErr != nil {
return false, bytesRead, s.resetRemotelyErr
}
@@ -178,8 +171,9 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err
s.readPosInFrame += m
bytesRead += m
- // when a RESET_STREAM was received, the was already informed about the final byteOffset for this stream
- if !s.resetRemotely {
+ // when a RESET_STREAM was received, the flow controller was already
+ // informed about the final byteOffset for this stream
+ if s.resetRemotelyErr == nil {
s.flowController.AddBytesRead(protocol.ByteCount(m))
}
@@ -214,11 +208,10 @@ func (s *receiveStream) CancelRead(errorCode StreamErrorCode) {
}
func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) bool /* completed */ {
- if s.finRead || s.canceledRead || s.resetRemotely {
+ if s.finRead || s.cancelReadErr != nil || s.resetRemotelyErr != nil {
return false
}
- s.canceledRead = true
- s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode)
+ s.cancelReadErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: false}
s.signalRead()
s.sender.queueControlFrame(&wire.StopSendingFrame{
StreamID: s.streamID,
@@ -250,7 +243,7 @@ func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) (bool /*
newlyRcvdFinalOffset = s.finalOffset == protocol.MaxByteCount
s.finalOffset = maxOffset
}
- if s.canceledRead {
+ if s.cancelReadErr != nil {
return newlyRcvdFinalOffset, nil
}
if err := s.frameQueue.Push(frame.Data, frame.Offset, frame.PutBack); err != nil {
@@ -273,7 +266,7 @@ func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) err
}
func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) (bool /*completed */, error) {
- if s.closedForShutdown {
+ if s.closeForShutdownErr != nil {
return false, nil
}
if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil {
@@ -283,13 +276,13 @@ func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame)
s.finalOffset = frame.FinalSize
// ignore duplicate RESET_STREAM frames for this stream (after checking their final offset)
- if s.resetRemotely {
+ if s.resetRemotelyErr != nil {
return false, nil
}
- s.resetRemotely = true
s.resetRemotelyErr = &StreamError{
StreamID: s.streamID,
ErrorCode: frame.ErrorCode,
+ Remote: true,
}
s.signalRead()
return newlyRcvdFinalOffset, nil
@@ -312,7 +305,6 @@ func (s *receiveStream) SetReadDeadline(t time.Time) error {
// The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET.
func (s *receiveStream) closeForShutdown(err error) {
s.mutex.Lock()
- s.closedForShutdown = true
s.closeForShutdownErr = err
s.mutex.Unlock()
s.signalRead()
diff --git a/vendor/github.com/lucas-clemente/quic-go/retransmission_queue.go b/vendor/github.com/quic-go/quic-go/retransmission_queue.go
similarity index 83%
rename from vendor/github.com/lucas-clemente/quic-go/retransmission_queue.go
rename to vendor/github.com/quic-go/quic-go/retransmission_queue.go
index 0cfbbc4de..2ce0b8931 100644
--- a/vendor/github.com/lucas-clemente/quic-go/retransmission_queue.go
+++ b/vendor/github.com/quic-go/quic-go/retransmission_queue.go
@@ -3,8 +3,8 @@ package quic
import (
"fmt"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
type retransmissionQueue struct {
@@ -15,12 +15,10 @@ type retransmissionQueue struct {
handshakeCryptoData []*wire.CryptoFrame
appData []wire.Frame
-
- version protocol.VersionNumber
}
-func newRetransmissionQueue(ver protocol.VersionNumber) *retransmissionQueue {
- return &retransmissionQueue{version: ver}
+func newRetransmissionQueue() *retransmissionQueue {
+ return &retransmissionQueue{}
}
func (q *retransmissionQueue) AddInitial(f wire.Frame) {
@@ -58,10 +56,10 @@ func (q *retransmissionQueue) AddAppData(f wire.Frame) {
q.appData = append(q.appData, f)
}
-func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Frame {
+func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame {
if len(q.initialCryptoData) > 0 {
f := q.initialCryptoData[0]
- newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version)
+ newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v)
if newFrame == nil && !needsSplit { // the whole frame fits
q.initialCryptoData = q.initialCryptoData[1:]
return f
@@ -74,17 +72,17 @@ func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Fr
return nil
}
f := q.initial[0]
- if f.Length(q.version) > maxLen {
+ if f.Length(v) > maxLen {
return nil
}
q.initial = q.initial[1:]
return f
}
-func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire.Frame {
+func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame {
if len(q.handshakeCryptoData) > 0 {
f := q.handshakeCryptoData[0]
- newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version)
+ newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v)
if newFrame == nil && !needsSplit { // the whole frame fits
q.handshakeCryptoData = q.handshakeCryptoData[1:]
return f
@@ -97,19 +95,19 @@ func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire.
return nil
}
f := q.handshake[0]
- if f.Length(q.version) > maxLen {
+ if f.Length(v) > maxLen {
return nil
}
q.handshake = q.handshake[1:]
return f
}
-func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount) wire.Frame {
+func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame {
if len(q.appData) == 0 {
return nil
}
f := q.appData[0]
- if f.Length(q.version) > maxLen {
+ if f.Length(v) > maxLen {
return nil
}
q.appData = q.appData[1:]
diff --git a/vendor/github.com/lucas-clemente/quic-go/send_conn.go b/vendor/github.com/quic-go/quic-go/send_conn.go
similarity index 100%
rename from vendor/github.com/lucas-clemente/quic-go/send_conn.go
rename to vendor/github.com/quic-go/quic-go/send_conn.go
diff --git a/vendor/github.com/lucas-clemente/quic-go/send_queue.go b/vendor/github.com/quic-go/quic-go/send_queue.go
similarity index 93%
rename from vendor/github.com/lucas-clemente/quic-go/send_queue.go
rename to vendor/github.com/quic-go/quic-go/send_queue.go
index 1fc8c1bf8..9eafcd374 100644
--- a/vendor/github.com/lucas-clemente/quic-go/send_queue.go
+++ b/vendor/github.com/quic-go/quic-go/send_queue.go
@@ -36,6 +36,13 @@ func newSendQueue(conn sendConn) sender {
func (h *sendQueue) Send(p *packetBuffer) {
select {
case h.queue <- p:
+ // clear available channel if we've reached capacity
+ if len(h.queue) == sendQueueCapacity {
+ select {
+ case <-h.available:
+ default:
+ }
+ }
case <-h.runStopped:
default:
panic("sendQueue.Send would have blocked")
diff --git a/vendor/github.com/lucas-clemente/quic-go/send_stream.go b/vendor/github.com/quic-go/quic-go/send_stream.go
similarity index 81%
rename from vendor/github.com/lucas-clemente/quic-go/send_stream.go
rename to vendor/github.com/quic-go/quic-go/send_stream.go
index b23df00b3..cebe30ef0 100644
--- a/vendor/github.com/lucas-clemente/quic-go/send_stream.go
+++ b/vendor/github.com/quic-go/quic-go/send_stream.go
@@ -6,19 +6,19 @@ import (
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/ackhandler"
- "github.com/lucas-clemente/quic-go/internal/flowcontrol"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/ackhandler"
+ "github.com/quic-go/quic-go/internal/flowcontrol"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
)
type sendStreamI interface {
SendStream
handleStopSendingFrame(*wire.StopSendingFrame)
hasData() bool
- popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool)
+ popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool)
closeForShutdown(error)
updateSendWindow(protocol.ByteCount)
}
@@ -40,11 +40,9 @@ type sendStream struct {
cancelWriteErr error
closeForShutdownErr error
- closedForShutdown bool // set when CloseForShutdown() is called
- finishedWriting bool // set once Close() is called
- canceledWrite bool // set when CancelWrite() is called, or a STOP_SENDING frame is received
- finSent bool // set when a STREAM_FRAME with FIN bit has been sent
- completed bool // set when this stream has been reported to the streamSender as completed
+ finishedWriting bool // set once Close() is called
+ finSent bool // set when a STREAM_FRAME with FIN bit has been sent
+ completed bool // set when this stream has been reported to the streamSender as completed
dataForWriting []byte // during a Write() call, this slice is the part of p that still needs to be sent out
nextFrame *wire.StreamFrame
@@ -54,8 +52,6 @@ type sendStream struct {
deadline time.Time
flowController flowcontrol.StreamFlowController
-
- version protocol.VersionNumber
}
var (
@@ -67,7 +63,6 @@ func newSendStream(
streamID protocol.StreamID,
sender streamSender,
flowController flowcontrol.StreamFlowController,
- version protocol.VersionNumber,
) *sendStream {
s := &sendStream{
streamID: streamID,
@@ -75,7 +70,6 @@ func newSendStream(
flowController: flowController,
writeChan: make(chan struct{}, 1),
writeOnce: make(chan struct{}, 1), // cap: 1, to protect against concurrent use of Write
- version: version,
}
s.ctx, s.ctxCancel = context.WithCancel(context.Background())
return s
@@ -98,7 +92,7 @@ func (s *sendStream) Write(p []byte) (int, error) {
if s.finishedWriting {
return 0, fmt.Errorf("write on closed stream %d", s.streamID)
}
- if s.canceledWrite {
+ if s.cancelWriteErr != nil {
return 0, s.cancelWriteErr
}
if s.closeForShutdownErr != nil {
@@ -122,7 +116,7 @@ func (s *sendStream) Write(p []byte) (int, error) {
var copied bool
var deadline time.Time
// As soon as dataForWriting becomes smaller than a certain size x, we copy all the data to a STREAM frame (s.nextFrame),
- // which can the be popped the next time we assemble a packet.
+ // which can then be popped the next time we assemble a packet.
// This allows us to return Write() when all data but x bytes have been sent out.
// When the user now calls Close(), this is much more likely to happen before we popped that last STREAM frame,
// allowing us to set the FIN bit on that frame (instead of sending an empty STREAM frame with FIN).
@@ -157,7 +151,7 @@ func (s *sendStream) Write(p []byte) (int, error) {
}
deadlineTimer.Reset(deadline)
}
- if s.dataForWriting == nil || s.canceledWrite || s.closedForShutdown {
+ if s.dataForWriting == nil || s.cancelWriteErr != nil || s.closeForShutdownErr != nil {
break
}
}
@@ -204,9 +198,9 @@ func (s *sendStream) canBufferStreamFrame() bool {
// popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream
// maxBytes is the maximum length this frame (including frame header) will have.
-func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool /* has more data to send */) {
+func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool /* has more data to send */) {
s.mutex.Lock()
- f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes)
+ f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v)
if f != nil {
s.numOutstandingFrames++
}
@@ -215,16 +209,20 @@ func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Fr
if f == nil {
return nil, hasMoreData
}
- return &ackhandler.Frame{Frame: f, OnLost: s.queueRetransmission, OnAcked: s.frameAcked}, hasMoreData
+ af := ackhandler.GetFrame()
+ af.Frame = f
+ af.OnLost = s.queueRetransmission
+ af.OnAcked = s.frameAcked
+ return af, hasMoreData
}
-func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more data to send */) {
- if s.canceledWrite || s.closeForShutdownErr != nil {
+func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more data to send */) {
+ if s.cancelWriteErr != nil || s.closeForShutdownErr != nil {
return nil, false
}
if len(s.retransmissionQueue) > 0 {
- f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes)
+ f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v)
if f != nil || hasMoreRetransmissions {
if f == nil {
return nil, true
@@ -260,7 +258,7 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun
return nil, true
}
- f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow)
+ f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v)
if dataLen := f.DataLen(); dataLen > 0 {
s.writeOffset += f.DataLen()
s.flowController.AddBytesSent(f.DataLen())
@@ -272,12 +270,12 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun
return f, hasMoreData
}
-func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) (*wire.StreamFrame, bool) {
+func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool) {
if s.nextFrame != nil {
nextFrame := s.nextFrame
s.nextFrame = nil
- maxDataLen := utils.MinByteCount(sendWindow, nextFrame.MaxDataLen(maxBytes, s.version))
+ maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, v))
if nextFrame.DataLen() > maxDataLen {
s.nextFrame = wire.GetStreamFrame()
s.nextFrame.StreamID = s.streamID
@@ -299,7 +297,7 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount)
f.DataLenPresent = true
f.Data = f.Data[:0]
- hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow)
+ hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow, v)
if len(f.Data) == 0 && !f.Fin {
f.PutBack()
return nil, hasMoreData
@@ -307,19 +305,19 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount)
return f, hasMoreData
}
-func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount) bool {
- maxDataLen := f.MaxDataLen(maxBytes, s.version)
+func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) bool {
+ maxDataLen := f.MaxDataLen(maxBytes, v)
if maxDataLen == 0 { // a STREAM frame must have at least one byte of data
return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting
}
- s.getDataForWriting(f, utils.MinByteCount(maxDataLen, sendWindow))
+ s.getDataForWriting(f, utils.Min(maxDataLen, sendWindow))
return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting
}
-func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more retransmissions */) {
+func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more retransmissions */) {
f := s.retransmissionQueue[0]
- newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, s.version)
+ newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, v)
if needsSplit {
return newFrame, true
}
@@ -354,7 +352,7 @@ func (s *sendStream) frameAcked(f wire.Frame) {
f.(*wire.StreamFrame).PutBack()
s.mutex.Lock()
- if s.canceledWrite {
+ if s.cancelWriteErr != nil {
s.mutex.Unlock()
return
}
@@ -371,7 +369,7 @@ func (s *sendStream) frameAcked(f wire.Frame) {
}
func (s *sendStream) isNewlyCompleted() bool {
- completed := (s.finSent || s.canceledWrite) && s.numOutstandingFrames == 0 && len(s.retransmissionQueue) == 0
+ completed := (s.finSent || s.cancelWriteErr != nil) && s.numOutstandingFrames == 0 && len(s.retransmissionQueue) == 0
if completed && !s.completed {
s.completed = true
return true
@@ -383,7 +381,7 @@ func (s *sendStream) queueRetransmission(f wire.Frame) {
sf := f.(*wire.StreamFrame)
sf.DataLenPresent = true
s.mutex.Lock()
- if s.canceledWrite {
+ if s.cancelWriteErr != nil {
s.mutex.Unlock()
return
}
@@ -399,11 +397,11 @@ func (s *sendStream) queueRetransmission(f wire.Frame) {
func (s *sendStream) Close() error {
s.mutex.Lock()
- if s.closedForShutdown {
+ if s.closeForShutdownErr != nil {
s.mutex.Unlock()
return nil
}
- if s.canceledWrite {
+ if s.cancelWriteErr != nil {
s.mutex.Unlock()
return fmt.Errorf("close called for canceled stream %d", s.streamID)
}
@@ -416,19 +414,18 @@ func (s *sendStream) Close() error {
}
func (s *sendStream) CancelWrite(errorCode StreamErrorCode) {
- s.cancelWriteImpl(errorCode, fmt.Errorf("Write on stream %d canceled with error code %d", s.streamID, errorCode))
+ s.cancelWriteImpl(errorCode, false)
}
// must be called after locking the mutex
-func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, writeErr error) {
+func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) {
s.mutex.Lock()
- if s.canceledWrite {
+ if s.cancelWriteErr != nil {
s.mutex.Unlock()
return
}
s.ctxCancel()
- s.canceledWrite = true
- s.cancelWriteErr = writeErr
+ s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote}
s.numOutstandingFrames = 0
s.retransmissionQueue = nil
newlyCompleted := s.isNewlyCompleted()
@@ -457,10 +454,7 @@ func (s *sendStream) updateSendWindow(limit protocol.ByteCount) {
}
func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) {
- s.cancelWriteImpl(frame.ErrorCode, &StreamError{
- StreamID: s.streamID,
- ErrorCode: frame.ErrorCode,
- })
+ s.cancelWriteImpl(frame.ErrorCode, true)
}
func (s *sendStream) Context() context.Context {
@@ -481,7 +475,6 @@ func (s *sendStream) SetWriteDeadline(t time.Time) error {
func (s *sendStream) closeForShutdown(err error) {
s.mutex.Lock()
s.ctxCancel()
- s.closedForShutdown = true
s.closeForShutdownErr = err
s.mutex.Unlock()
s.signalWrite()
diff --git a/vendor/github.com/lucas-clemente/quic-go/server.go b/vendor/github.com/quic-go/quic-go/server.go
similarity index 80%
rename from vendor/github.com/lucas-clemente/quic-go/server.go
rename to vendor/github.com/quic-go/quic-go/server.go
index 0e6429705..734d617f7 100644
--- a/vendor/github.com/lucas-clemente/quic-go/server.go
+++ b/vendor/github.com/quic-go/quic-go/server.go
@@ -1,7 +1,6 @@
package quic
import (
- "bytes"
"context"
"crypto/rand"
"crypto/tls"
@@ -12,12 +11,12 @@ import (
"sync/atomic"
"time"
- "github.com/lucas-clemente/quic-go/internal/handshake"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/utils"
- "github.com/lucas-clemente/quic-go/internal/wire"
- "github.com/lucas-clemente/quic-go/logging"
+ "github.com/quic-go/quic-go/internal/handshake"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/logging"
)
// ErrServerClosed is returned by the Listener or EarlyListener's Accept method after a call to Close.
@@ -88,7 +87,7 @@ type baseServer struct {
*Config,
*tls.Config,
*handshake.TokenGenerator,
- bool, /* enable 0-RTT */
+ bool, /* client address validated by an address validation token */
logging.ConnectionTracer,
uint64,
utils.Logger,
@@ -190,7 +189,7 @@ func listen(conn net.PacketConn, tlsConf *tls.Config, config *Config, acceptEarl
}
}
- connHandler, err := getMultiplexer().AddConn(conn, config.ConnectionIDLength, config.StatelessResetKey, config.Tracer)
+ connHandler, err := getMultiplexer().AddConn(conn, config.ConnectionIDGenerator.ConnectionIDLen(), config.StatelessResetKey, config.Tracer)
if err != nil {
return nil, err
}
@@ -241,26 +240,6 @@ func (s *baseServer) run() {
}
}
-var defaultAcceptToken = func(clientAddr net.Addr, token *Token) bool {
- if token == nil {
- return false
- }
- validity := protocol.TokenValidity
- if token.IsRetryToken {
- validity = protocol.RetryTokenValidity
- }
- if time.Now().After(token.SentTime.Add(validity)) {
- return false
- }
- var sourceAddr string
- if udpAddr, ok := clientAddr.(*net.UDPAddr); ok {
- sourceAddr = udpAddr.IP.String()
- } else {
- sourceAddr = clientAddr.String()
- }
- return sourceAddr == token.RemoteAddr
-}
-
// Accept returns connections that already completed the handshake.
// It is only valid if acceptEarlyConns is false.
func (s *baseServer) Accept(ctx context.Context) (Connection, error) {
@@ -339,20 +318,43 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s
}
return false
}
+ // Short header packets should never end up here in the first place
+ if !wire.IsLongHeaderPacket(p.data[0]) {
+ panic(fmt.Sprintf("misrouted packet: %#v", p.data))
+ }
+ v, err := wire.ParseVersion(p.data)
+ // send a Version Negotiation Packet if the client is speaking a different protocol version
+ if err != nil || !protocol.IsSupportedVersion(s.config.Versions, v) {
+ if err != nil || p.Size() < protocol.MinUnknownVersionPacketSize {
+ s.logger.Debugf("Dropping a packet with an unknown version that is too small (%d bytes)", p.Size())
+ if s.config.Tracer != nil {
+ s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket)
+ }
+ return false
+ }
+ _, src, dest, err := wire.ParseArbitraryLenConnectionIDs(p.data)
+ if err != nil { // should never happen
+ s.logger.Debugf("Dropping a packet with an unknown version for which we failed to parse connection IDs")
+ if s.config.Tracer != nil {
+ s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket)
+ }
+ return false
+ }
+ if !s.config.DisableVersionNegotiationPackets {
+ go s.sendVersionNegotiationPacket(p.remoteAddr, src, dest, p.info.OOB(), v)
+ }
+ return false
+ }
// If we're creating a new connection, the packet will be passed to the connection.
// The header will then be parsed again.
- hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDLength)
- if err != nil && err != wire.ErrUnsupportedVersion {
+ hdr, _, _, err := wire.ParsePacket(p.data)
+ if err != nil {
if s.config.Tracer != nil {
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)
}
s.logger.Debugf("Error parsing packet: %s", err)
return false
}
- // Short header packets should never end up here in the first place
- if !hdr.IsLongHeader {
- panic(fmt.Sprintf("misrouted packet: %#v", hdr))
- }
if hdr.Type == protocol.PacketTypeInitial && p.Size() < protocol.MinInitialPacketSize {
s.logger.Debugf("Dropping a packet that is too small to be a valid Initial (%d bytes)", p.Size())
if s.config.Tracer != nil {
@@ -360,21 +362,8 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s
}
return false
}
- // send a Version Negotiation Packet if the client is speaking a different protocol version
- if !protocol.IsSupportedVersion(s.config.Versions, hdr.Version) {
- if p.Size() < protocol.MinUnknownVersionPacketSize {
- s.logger.Debugf("Dropping a packet with an unknown version that is too small (%d bytes)", p.Size())
- if s.config.Tracer != nil {
- s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket)
- }
- return false
- }
- if !s.config.DisableVersionNegotiationPackets {
- go s.sendVersionNegotiationPacket(p, hdr)
- }
- return false
- }
- if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial {
+
+ if hdr.Type != protocol.PacketTypeInitial {
// Drop long header packets.
// There's little point in sending a Stateless Reset, since the client
// might not have received the token yet.
@@ -395,6 +384,26 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s
return true
}
+// validateToken returns false if:
+// - address is invalid
+// - token is expired
+// - token is null
+func (s *baseServer) validateToken(token *handshake.Token, addr net.Addr) bool {
+ if token == nil {
+ return false
+ }
+ if !token.ValidateRemoteAddr(addr) {
+ return false
+ }
+ if !token.IsRetryToken && time.Since(token.SentTime) > s.config.MaxTokenAge {
+ return false
+ }
+ if token.IsRetryToken && time.Since(token.SentTime) > s.config.MaxRetryTokenAge {
+ return false
+ }
+ return true
+}
+
func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) error {
if len(hdr.Token) == 0 && hdr.DestConnectionID.Len() < protocol.MinConnectionIDLenInitial {
p.buffer.Release()
@@ -405,33 +414,45 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro
}
var (
- token *Token
+ token *handshake.Token
retrySrcConnID *protocol.ConnectionID
)
origDestConnID := hdr.DestConnectionID
if len(hdr.Token) > 0 {
- c, err := s.tokenGenerator.DecodeToken(hdr.Token)
+ tok, err := s.tokenGenerator.DecodeToken(hdr.Token)
if err == nil {
- token = &Token{
- IsRetryToken: c.IsRetryToken,
- RemoteAddr: c.RemoteAddr,
- SentTime: c.SentTime,
- }
- if token.IsRetryToken {
- origDestConnID = c.OriginalDestConnectionID
- retrySrcConnID = &c.RetrySrcConnectionID
+ if tok.IsRetryToken {
+ origDestConnID = tok.OriginalDestConnectionID
+ retrySrcConnID = &tok.RetrySrcConnectionID
}
+ token = tok
}
}
- if !s.config.AcceptToken(p.remoteAddr, token) {
- go func() {
- defer p.buffer.Release()
- if token != nil && token.IsRetryToken {
+
+ clientAddrIsValid := s.validateToken(token, p.remoteAddr)
+
+ if token != nil && !clientAddrIsValid {
+ // For invalid and expired non-retry tokens, we don't send an INVALID_TOKEN error.
+ // We just ignore them, and act as if there was no token on this packet at all.
+ // This also means we might send a Retry later.
+ if !token.IsRetryToken {
+ token = nil
+ } else {
+ // For Retry tokens, we send an INVALID_ERROR if
+ // * the token is too old, or
+ // * the token is invalid, in case of a retry token.
+ go func() {
+ defer p.buffer.Release()
if err := s.maybeSendInvalidToken(p, hdr); err != nil {
s.logger.Debugf("Error sending INVALID_TOKEN error: %s", err)
}
- return
- }
+ }()
+ return nil
+ }
+ }
+ if token == nil && s.config.RequireAddressValidation(p.remoteAddr) {
+ go func() {
+ defer p.buffer.Release()
if err := s.sendRetry(p.remoteAddr, hdr, p.info); err != nil {
s.logger.Debugf("Error sending Retry: %s", err)
}
@@ -450,7 +471,7 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro
return nil
}
- connID, err := protocol.GenerateConnectionID(s.config.ConnectionIDLength)
+ connID, err := s.config.ConnectionIDGenerator.GenerateConnectionID()
if err != nil {
return err
}
@@ -483,7 +504,7 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro
s.config,
s.tlsConf,
s.tokenGenerator,
- s.acceptEarlyConns,
+ clientAddrIsValid,
tracer,
tracingID,
s.logger,
@@ -535,7 +556,7 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack
// Log the Initial packet now.
// If no Retry is sent, the packet will be logged by the connection.
(&wire.ExtendedHeader{Header: *hdr}).Log(s.logger)
- srcConnID, err := protocol.GenerateConnectionID(s.config.ConnectionIDLength)
+ srcConnID, err := s.config.ConnectionIDGenerator.GenerateConnectionID()
if err != nil {
return err
}
@@ -544,7 +565,6 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack
return err
}
replyHdr := &wire.ExtendedHeader{}
- replyHdr.IsLongHeader = true
replyHdr.Type = protocol.PacketTypeRetry
replyHdr.Version = hdr.Version
replyHdr.SrcConnectionID = srcConnID
@@ -556,19 +576,19 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack
replyHdr.Log(s.logger)
}
- packetBuffer := getPacketBuffer()
- defer packetBuffer.Release()
- buf := bytes.NewBuffer(packetBuffer.Data)
- if err := replyHdr.Write(buf, hdr.Version); err != nil {
+ buf := getPacketBuffer()
+ defer buf.Release()
+ buf.Data, err = replyHdr.Append(buf.Data, hdr.Version)
+ if err != nil {
return err
}
// append the Retry integrity tag
- tag := handshake.GetRetryIntegrityTag(buf.Bytes(), hdr.DestConnectionID, hdr.Version)
- buf.Write(tag[:])
+ tag := handshake.GetRetryIntegrityTag(buf.Data, hdr.DestConnectionID, hdr.Version)
+ buf.Data = append(buf.Data, tag[:]...)
if s.config.Tracer != nil {
- s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(buf.Len()), nil)
+ s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(buf.Data)), nil)
}
- _, err = s.conn.WritePacket(buf.Bytes(), remoteAddr, info.OOB())
+ _, err = s.conn.WritePacket(buf.Data, remoteAddr, info.OOB())
return err
}
@@ -577,7 +597,7 @@ func (s *baseServer) maybeSendInvalidToken(p *receivedPacket, hdr *wire.Header)
// This makes sure that we won't send it for packets that were corrupted.
sealer, opener := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer, hdr.Version)
data := p.data[:hdr.ParsedLen()+hdr.Length]
- extHdr, err := unpackHeader(opener, hdr, data, hdr.Version)
+ extHdr, err := unpackLongHeader(opener, hdr, data, hdr.Version)
if err != nil {
if s.config.Tracer != nil {
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropHeaderParseError)
@@ -606,65 +626,57 @@ func (s *baseServer) sendConnectionRefused(remoteAddr net.Addr, hdr *wire.Header
// sendError sends the error as a response to the packet received with header hdr
func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer handshake.LongHeaderSealer, errorCode qerr.TransportErrorCode, info *packetInfo) error {
- packetBuffer := getPacketBuffer()
- defer packetBuffer.Release()
- buf := bytes.NewBuffer(packetBuffer.Data)
+ b := getPacketBuffer()
+ defer b.Release()
ccf := &wire.ConnectionCloseFrame{ErrorCode: uint64(errorCode)}
replyHdr := &wire.ExtendedHeader{}
- replyHdr.IsLongHeader = true
replyHdr.Type = protocol.PacketTypeInitial
replyHdr.Version = hdr.Version
replyHdr.SrcConnectionID = hdr.DestConnectionID
replyHdr.DestConnectionID = hdr.SrcConnectionID
replyHdr.PacketNumberLen = protocol.PacketNumberLen4
replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead())
- if err := replyHdr.Write(buf, hdr.Version); err != nil {
+ var err error
+ b.Data, err = replyHdr.Append(b.Data, hdr.Version)
+ if err != nil {
return err
}
- payloadOffset := buf.Len()
+ payloadOffset := len(b.Data)
- if err := ccf.Write(buf, hdr.Version); err != nil {
+ b.Data, err = ccf.Append(b.Data, hdr.Version)
+ if err != nil {
return err
}
- raw := buf.Bytes()
- _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset])
- raw = raw[0 : buf.Len()+sealer.Overhead()]
+ _ = sealer.Seal(b.Data[payloadOffset:payloadOffset], b.Data[payloadOffset:], replyHdr.PacketNumber, b.Data[:payloadOffset])
+ b.Data = b.Data[0 : len(b.Data)+sealer.Overhead()]
pnOffset := payloadOffset - int(replyHdr.PacketNumberLen)
sealer.EncryptHeader(
- raw[pnOffset+4:pnOffset+4+16],
- &raw[0],
- raw[pnOffset:payloadOffset],
+ b.Data[pnOffset+4:pnOffset+4+16],
+ &b.Data[0],
+ b.Data[pnOffset:payloadOffset],
)
replyHdr.Log(s.logger)
wire.LogFrame(s.logger, ccf, true)
if s.config.Tracer != nil {
- s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(raw)), []logging.Frame{ccf})
+ s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(b.Data)), []logging.Frame{ccf})
}
- _, err := s.conn.WritePacket(raw, remoteAddr, info.OOB())
+ _, err = s.conn.WritePacket(b.Data, remoteAddr, info.OOB())
return err
}
-func (s *baseServer) sendVersionNegotiationPacket(p *receivedPacket, hdr *wire.Header) {
- s.logger.Debugf("Client offered version %s, sending Version Negotiation", hdr.Version)
- data := wire.ComposeVersionNegotiation(hdr.SrcConnectionID, hdr.DestConnectionID, s.config.Versions)
+func (s *baseServer) sendVersionNegotiationPacket(remote net.Addr, src, dest protocol.ArbitraryLenConnectionID, oob []byte, v protocol.VersionNumber) {
+ s.logger.Debugf("Client offered version %s, sending Version Negotiation", v)
+
+ data := wire.ComposeVersionNegotiation(dest, src, s.config.Versions)
if s.config.Tracer != nil {
- s.config.Tracer.SentPacket(
- p.remoteAddr,
- &wire.Header{
- IsLongHeader: true,
- DestConnectionID: hdr.SrcConnectionID,
- SrcConnectionID: hdr.DestConnectionID,
- },
- protocol.ByteCount(len(data)),
- nil,
- )
+ s.config.Tracer.SentVersionNegotiationPacket(remote, src, dest, s.config.Versions)
}
- if _, err := s.conn.WritePacket(data, p.remoteAddr, p.info.OOB()); err != nil {
+ if _, err := s.conn.WritePacket(data, remote, oob); err != nil {
s.logger.Debugf("Error sending Version Negotiation: %s", err)
}
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/stream.go b/vendor/github.com/quic-go/quic-go/stream.go
similarity index 89%
rename from vendor/github.com/lucas-clemente/quic-go/stream.go
rename to vendor/github.com/quic-go/quic-go/stream.go
index 95bbcb356..98d2fc6e4 100644
--- a/vendor/github.com/lucas-clemente/quic-go/stream.go
+++ b/vendor/github.com/quic-go/quic-go/stream.go
@@ -6,10 +6,10 @@ import (
"sync"
"time"
- "github.com/lucas-clemente/quic-go/internal/ackhandler"
- "github.com/lucas-clemente/quic-go/internal/flowcontrol"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/ackhandler"
+ "github.com/quic-go/quic-go/internal/flowcontrol"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
type deadlineError struct{}
@@ -60,7 +60,7 @@ type streamI interface {
// for sending
hasData() bool
handleStopSendingFrame(*wire.StopSendingFrame)
- popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool)
+ popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool)
updateSendWindow(protocol.ByteCount)
}
@@ -80,8 +80,6 @@ type stream struct {
sender streamSender
receiveStreamCompleted bool
sendStreamCompleted bool
-
- version protocol.VersionNumber
}
var _ Stream = &stream{}
@@ -90,9 +88,8 @@ var _ Stream = &stream{}
func newStream(streamID protocol.StreamID,
sender streamSender,
flowController flowcontrol.StreamFlowController,
- version protocol.VersionNumber,
) *stream {
- s := &stream{sender: sender, version: version}
+ s := &stream{sender: sender}
senderForSendStream := &uniStreamSender{
streamSender: sender,
onStreamCompletedImpl: func() {
@@ -102,7 +99,7 @@ func newStream(streamID protocol.StreamID,
s.completedMutex.Unlock()
},
}
- s.sendStream = *newSendStream(streamID, senderForSendStream, flowController, version)
+ s.sendStream = *newSendStream(streamID, senderForSendStream, flowController)
senderForReceiveStream := &uniStreamSender{
streamSender: sender,
onStreamCompletedImpl: func() {
@@ -112,7 +109,7 @@ func newStream(streamID protocol.StreamID,
s.completedMutex.Unlock()
},
}
- s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController, version)
+ s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController)
return s
}
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map.go b/vendor/github.com/quic-go/quic-go/streams_map.go
similarity index 90%
rename from vendor/github.com/lucas-clemente/quic-go/streams_map.go
rename to vendor/github.com/quic-go/quic-go/streams_map.go
index 79c1ee91a..b1a80eb36 100644
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map.go
+++ b/vendor/github.com/quic-go/quic-go/streams_map.go
@@ -7,10 +7,10 @@ import (
"net"
"sync"
- "github.com/lucas-clemente/quic-go/internal/flowcontrol"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/qerr"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/flowcontrol"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/qerr"
+ "github.com/quic-go/quic-go/internal/wire"
)
type streamError struct {
@@ -46,7 +46,6 @@ var errTooManyOpenStreams = errors.New("too many open streams")
type streamsMap struct {
perspective protocol.Perspective
- version protocol.VersionNumber
maxIncomingBidiStreams uint64
maxIncomingUniStreams uint64
@@ -55,10 +54,10 @@ type streamsMap struct {
newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController
mutex sync.Mutex
- outgoingBidiStreams *outgoingBidiStreamsMap
- outgoingUniStreams *outgoingUniStreamsMap
- incomingBidiStreams *incomingBidiStreamsMap
- incomingUniStreams *incomingUniStreamsMap
+ outgoingBidiStreams *outgoingStreamsMap[streamI]
+ outgoingUniStreams *outgoingStreamsMap[sendStreamI]
+ incomingBidiStreams *incomingStreamsMap[streamI]
+ incomingUniStreams *incomingStreamsMap[receiveStreamI]
reset bool
}
@@ -70,7 +69,6 @@ func newStreamsMap(
maxIncomingBidiStreams uint64,
maxIncomingUniStreams uint64,
perspective protocol.Perspective,
- version protocol.VersionNumber,
) streamManager {
m := &streamsMap{
perspective: perspective,
@@ -78,39 +76,42 @@ func newStreamsMap(
maxIncomingBidiStreams: maxIncomingBidiStreams,
maxIncomingUniStreams: maxIncomingUniStreams,
sender: sender,
- version: version,
}
m.initMaps()
return m
}
func (m *streamsMap) initMaps() {
- m.outgoingBidiStreams = newOutgoingBidiStreamsMap(
+ m.outgoingBidiStreams = newOutgoingStreamsMap(
+ protocol.StreamTypeBidi,
func(num protocol.StreamNum) streamI {
id := num.StreamID(protocol.StreamTypeBidi, m.perspective)
- return newStream(id, m.sender, m.newFlowController(id), m.version)
+ return newStream(id, m.sender, m.newFlowController(id))
},
m.sender.queueControlFrame,
)
- m.incomingBidiStreams = newIncomingBidiStreamsMap(
+ m.incomingBidiStreams = newIncomingStreamsMap(
+ protocol.StreamTypeBidi,
func(num protocol.StreamNum) streamI {
id := num.StreamID(protocol.StreamTypeBidi, m.perspective.Opposite())
- return newStream(id, m.sender, m.newFlowController(id), m.version)
+ return newStream(id, m.sender, m.newFlowController(id))
},
m.maxIncomingBidiStreams,
m.sender.queueControlFrame,
)
- m.outgoingUniStreams = newOutgoingUniStreamsMap(
+ m.outgoingUniStreams = newOutgoingStreamsMap(
+ protocol.StreamTypeUni,
func(num protocol.StreamNum) sendStreamI {
id := num.StreamID(protocol.StreamTypeUni, m.perspective)
- return newSendStream(id, m.sender, m.newFlowController(id), m.version)
+ return newSendStream(id, m.sender, m.newFlowController(id))
},
m.sender.queueControlFrame,
)
- m.incomingUniStreams = newIncomingUniStreamsMap(
+ m.incomingUniStreams = newIncomingStreamsMap(
+ protocol.StreamTypeUni,
func(num protocol.StreamNum) receiveStreamI {
id := num.StreamID(protocol.StreamTypeUni, m.perspective.Opposite())
- return newReceiveStream(id, m.sender, m.newFlowController(id), m.version)
+ return newReceiveStream(id, m.sender, m.newFlowController(id))
},
m.maxIncomingUniStreams,
m.sender.queueControlFrame,
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go b/vendor/github.com/quic-go/quic-go/streams_map_incoming.go
similarity index 76%
rename from vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go
rename to vendor/github.com/quic-go/quic-go/streams_map_incoming.go
index 5bddec00b..18ec6f998 100644
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go
+++ b/vendor/github.com/quic-go/quic-go/streams_map_incoming.go
@@ -1,49 +1,52 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
package quic
import (
"context"
"sync"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
+type incomingStream interface {
+ closeForShutdown(error)
+}
+
// When a stream is deleted before it was accepted, we can't delete it from the map immediately.
// We need to wait until the application accepts it, and delete it then.
-type receiveStreamIEntry struct {
- stream receiveStreamI
+type incomingStreamEntry[T incomingStream] struct {
+ stream T
shouldDelete bool
}
-type incomingUniStreamsMap struct {
+type incomingStreamsMap[T incomingStream] struct {
mutex sync.RWMutex
newStreamChan chan struct{}
- streams map[protocol.StreamNum]receiveStreamIEntry
+ streamType protocol.StreamType
+ streams map[protocol.StreamNum]incomingStreamEntry[T]
nextStreamToAccept protocol.StreamNum // the next stream that will be returned by AcceptStream()
nextStreamToOpen protocol.StreamNum // the highest stream that the peer opened
maxStream protocol.StreamNum // the highest stream that the peer is allowed to open
maxNumStreams uint64 // maximum number of streams
- newStream func(protocol.StreamNum) receiveStreamI
+ newStream func(protocol.StreamNum) T
queueMaxStreamID func(*wire.MaxStreamsFrame)
closeErr error
}
-func newIncomingUniStreamsMap(
- newStream func(protocol.StreamNum) receiveStreamI,
+func newIncomingStreamsMap[T incomingStream](
+ streamType protocol.StreamType,
+ newStream func(protocol.StreamNum) T,
maxStreams uint64,
queueControlFrame func(wire.Frame),
-) *incomingUniStreamsMap {
- return &incomingUniStreamsMap{
+) *incomingStreamsMap[T] {
+ return &incomingStreamsMap[T]{
newStreamChan: make(chan struct{}, 1),
- streams: make(map[protocol.StreamNum]receiveStreamIEntry),
+ streamType: streamType,
+ streams: make(map[protocol.StreamNum]incomingStreamEntry[T]),
maxStream: protocol.StreamNum(maxStreams),
maxNumStreams: maxStreams,
newStream: newStream,
@@ -53,7 +56,7 @@ func newIncomingUniStreamsMap(
}
}
-func (m *incomingUniStreamsMap) AcceptStream(ctx context.Context) (receiveStreamI, error) {
+func (m *incomingStreamsMap[T]) AcceptStream(ctx context.Context) (T, error) {
// drain the newStreamChan, so we don't check the map twice if the stream doesn't exist
select {
case <-m.newStreamChan:
@@ -63,12 +66,12 @@ func (m *incomingUniStreamsMap) AcceptStream(ctx context.Context) (receiveStream
m.mutex.Lock()
var num protocol.StreamNum
- var entry receiveStreamIEntry
+ var entry incomingStreamEntry[T]
for {
num = m.nextStreamToAccept
if m.closeErr != nil {
m.mutex.Unlock()
- return nil, m.closeErr
+ return *new(T), m.closeErr
}
var ok bool
entry, ok = m.streams[num]
@@ -78,7 +81,7 @@ func (m *incomingUniStreamsMap) AcceptStream(ctx context.Context) (receiveStream
m.mutex.Unlock()
select {
case <-ctx.Done():
- return nil, ctx.Err()
+ return *new(T), ctx.Err()
case <-m.newStreamChan:
}
m.mutex.Lock()
@@ -88,18 +91,18 @@ func (m *incomingUniStreamsMap) AcceptStream(ctx context.Context) (receiveStream
if entry.shouldDelete {
if err := m.deleteStream(num); err != nil {
m.mutex.Unlock()
- return nil, err
+ return *new(T), err
}
}
m.mutex.Unlock()
return entry.stream, nil
}
-func (m *incomingUniStreamsMap) GetOrOpenStream(num protocol.StreamNum) (receiveStreamI, error) {
+func (m *incomingStreamsMap[T]) GetOrOpenStream(num protocol.StreamNum) (T, error) {
m.mutex.RLock()
if num > m.maxStream {
m.mutex.RUnlock()
- return nil, streamError{
+ return *new(T), streamError{
message: "peer tried to open stream %d (current limit: %d)",
nums: []protocol.StreamNum{num, m.maxStream},
}
@@ -108,7 +111,7 @@ func (m *incomingUniStreamsMap) GetOrOpenStream(num protocol.StreamNum) (receive
// * this stream exists in the map, and we can return it, or
// * this stream was already closed, then we can return the nil
if num < m.nextStreamToOpen {
- var s receiveStreamI
+ var s T
// If the stream was already queued for deletion, and is just waiting to be accepted, don't return it.
if entry, ok := m.streams[num]; ok && !entry.shouldDelete {
s = entry.stream
@@ -123,7 +126,7 @@ func (m *incomingUniStreamsMap) GetOrOpenStream(num protocol.StreamNum) (receive
// * maxStream can only increase, so if the id was valid before, it definitely is valid now
// * highestStream is only modified by this function
for newNum := m.nextStreamToOpen; newNum <= num; newNum++ {
- m.streams[newNum] = receiveStreamIEntry{stream: m.newStream(newNum)}
+ m.streams[newNum] = incomingStreamEntry[T]{stream: m.newStream(newNum)}
select {
case m.newStreamChan <- struct{}{}:
default:
@@ -135,14 +138,14 @@ func (m *incomingUniStreamsMap) GetOrOpenStream(num protocol.StreamNum) (receive
return entry.stream, nil
}
-func (m *incomingUniStreamsMap) DeleteStream(num protocol.StreamNum) error {
+func (m *incomingStreamsMap[T]) DeleteStream(num protocol.StreamNum) error {
m.mutex.Lock()
defer m.mutex.Unlock()
return m.deleteStream(num)
}
-func (m *incomingUniStreamsMap) deleteStream(num protocol.StreamNum) error {
+func (m *incomingStreamsMap[T]) deleteStream(num protocol.StreamNum) error {
if _, ok := m.streams[num]; !ok {
return streamError{
message: "tried to delete unknown incoming stream %d",
@@ -173,7 +176,7 @@ func (m *incomingUniStreamsMap) deleteStream(num protocol.StreamNum) error {
if maxStream <= protocol.MaxStreamCount {
m.maxStream = maxStream
m.queueMaxStreamID(&wire.MaxStreamsFrame{
- Type: protocol.StreamTypeUni,
+ Type: m.streamType,
MaxStreamNum: m.maxStream,
})
}
@@ -181,7 +184,7 @@ func (m *incomingUniStreamsMap) deleteStream(num protocol.StreamNum) error {
return nil
}
-func (m *incomingUniStreamsMap) CloseWithError(err error) {
+func (m *incomingStreamsMap[T]) CloseWithError(err error) {
m.mutex.Lock()
m.closeErr = err
for _, entry := range m.streams {
diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go
similarity index 73%
rename from vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go
rename to vendor/github.com/quic-go/quic-go/streams_map_outgoing.go
index 8782364a5..fd45f4e7c 100644
--- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go
+++ b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go
@@ -1,21 +1,23 @@
-// This file was automatically generated by genny.
-// Any changes will be lost if this file is regenerated.
-// see https://github.com/cheekybits/genny
-
package quic
import (
"context"
"sync"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
-type outgoingUniStreamsMap struct {
+type outgoingStream interface {
+ updateSendWindow(protocol.ByteCount)
+ closeForShutdown(error)
+}
+
+type outgoingStreamsMap[T outgoingStream] struct {
mutex sync.RWMutex
- streams map[protocol.StreamNum]sendStreamI
+ streamType protocol.StreamType
+ streams map[protocol.StreamNum]T
openQueue map[uint64]chan struct{}
lowestInQueue uint64
@@ -25,18 +27,20 @@ type outgoingUniStreamsMap struct {
maxStream protocol.StreamNum // the maximum stream ID we're allowed to open
blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream
- newStream func(protocol.StreamNum) sendStreamI
+ newStream func(protocol.StreamNum) T
queueStreamIDBlocked func(*wire.StreamsBlockedFrame)
closeErr error
}
-func newOutgoingUniStreamsMap(
- newStream func(protocol.StreamNum) sendStreamI,
+func newOutgoingStreamsMap[T outgoingStream](
+ streamType protocol.StreamType,
+ newStream func(protocol.StreamNum) T,
queueControlFrame func(wire.Frame),
-) *outgoingUniStreamsMap {
- return &outgoingUniStreamsMap{
- streams: make(map[protocol.StreamNum]sendStreamI),
+) *outgoingStreamsMap[T] {
+ return &outgoingStreamsMap[T]{
+ streamType: streamType,
+ streams: make(map[protocol.StreamNum]T),
openQueue: make(map[uint64]chan struct{}),
maxStream: protocol.InvalidStreamNum,
nextStream: 1,
@@ -45,32 +49,32 @@ func newOutgoingUniStreamsMap(
}
}
-func (m *outgoingUniStreamsMap) OpenStream() (sendStreamI, error) {
+func (m *outgoingStreamsMap[T]) OpenStream() (T, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.closeErr != nil {
- return nil, m.closeErr
+ return *new(T), m.closeErr
}
// if there are OpenStreamSync calls waiting, return an error here
if len(m.openQueue) > 0 || m.nextStream > m.maxStream {
m.maybeSendBlockedFrame()
- return nil, streamOpenErr{errTooManyOpenStreams}
+ return *new(T), streamOpenErr{errTooManyOpenStreams}
}
return m.openStream(), nil
}
-func (m *outgoingUniStreamsMap) OpenStreamSync(ctx context.Context) (sendStreamI, error) {
+func (m *outgoingStreamsMap[T]) OpenStreamSync(ctx context.Context) (T, error) {
m.mutex.Lock()
defer m.mutex.Unlock()
if m.closeErr != nil {
- return nil, m.closeErr
+ return *new(T), m.closeErr
}
if err := ctx.Err(); err != nil {
- return nil, err
+ return *new(T), err
}
if len(m.openQueue) == 0 && m.nextStream <= m.maxStream {
@@ -92,13 +96,13 @@ func (m *outgoingUniStreamsMap) OpenStreamSync(ctx context.Context) (sendStreamI
case <-ctx.Done():
m.mutex.Lock()
delete(m.openQueue, queuePos)
- return nil, ctx.Err()
+ return *new(T), ctx.Err()
case <-waitChan:
}
m.mutex.Lock()
if m.closeErr != nil {
- return nil, m.closeErr
+ return *new(T), m.closeErr
}
if m.nextStream > m.maxStream {
// no stream available. Continue waiting
@@ -112,7 +116,7 @@ func (m *outgoingUniStreamsMap) OpenStreamSync(ctx context.Context) (sendStreamI
}
}
-func (m *outgoingUniStreamsMap) openStream() sendStreamI {
+func (m *outgoingStreamsMap[T]) openStream() T {
s := m.newStream(m.nextStream)
m.streams[m.nextStream] = s
m.nextStream++
@@ -121,7 +125,7 @@ func (m *outgoingUniStreamsMap) openStream() sendStreamI {
// maybeSendBlockedFrame queues a STREAMS_BLOCKED frame for the current stream offset,
// if we haven't sent one for this offset yet
-func (m *outgoingUniStreamsMap) maybeSendBlockedFrame() {
+func (m *outgoingStreamsMap[T]) maybeSendBlockedFrame() {
if m.blockedSent {
return
}
@@ -131,17 +135,17 @@ func (m *outgoingUniStreamsMap) maybeSendBlockedFrame() {
streamNum = m.maxStream
}
m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{
- Type: protocol.StreamTypeUni,
+ Type: m.streamType,
StreamLimit: streamNum,
})
m.blockedSent = true
}
-func (m *outgoingUniStreamsMap) GetStream(num protocol.StreamNum) (sendStreamI, error) {
+func (m *outgoingStreamsMap[T]) GetStream(num protocol.StreamNum) (T, error) {
m.mutex.RLock()
if num >= m.nextStream {
m.mutex.RUnlock()
- return nil, streamError{
+ return *new(T), streamError{
message: "peer attempted to open stream %d",
nums: []protocol.StreamNum{num},
}
@@ -151,7 +155,7 @@ func (m *outgoingUniStreamsMap) GetStream(num protocol.StreamNum) (sendStreamI,
return s, nil
}
-func (m *outgoingUniStreamsMap) DeleteStream(num protocol.StreamNum) error {
+func (m *outgoingStreamsMap[T]) DeleteStream(num protocol.StreamNum) error {
m.mutex.Lock()
defer m.mutex.Unlock()
@@ -165,7 +169,7 @@ func (m *outgoingUniStreamsMap) DeleteStream(num protocol.StreamNum) error {
return nil
}
-func (m *outgoingUniStreamsMap) SetMaxStream(num protocol.StreamNum) {
+func (m *outgoingStreamsMap[T]) SetMaxStream(num protocol.StreamNum) {
m.mutex.Lock()
defer m.mutex.Unlock()
@@ -183,7 +187,7 @@ func (m *outgoingUniStreamsMap) SetMaxStream(num protocol.StreamNum) {
// UpdateSendWindow is called when the peer's transport parameters are received.
// Only in the case of a 0-RTT handshake will we have open streams at this point.
// We might need to update the send window, in case the server increased it.
-func (m *outgoingUniStreamsMap) UpdateSendWindow(limit protocol.ByteCount) {
+func (m *outgoingStreamsMap[T]) UpdateSendWindow(limit protocol.ByteCount) {
m.mutex.Lock()
for _, str := range m.streams {
str.updateSendWindow(limit)
@@ -192,7 +196,7 @@ func (m *outgoingUniStreamsMap) UpdateSendWindow(limit protocol.ByteCount) {
}
// unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream
-func (m *outgoingUniStreamsMap) unblockOpenSync() {
+func (m *outgoingStreamsMap[T]) unblockOpenSync() {
if len(m.openQueue) == 0 {
return
}
@@ -211,7 +215,7 @@ func (m *outgoingUniStreamsMap) unblockOpenSync() {
}
}
-func (m *outgoingUniStreamsMap) CloseWithError(err error) {
+func (m *outgoingStreamsMap[T]) CloseWithError(err error) {
m.mutex.Lock()
m.closeErr = err
for _, str := range m.streams {
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn.go b/vendor/github.com/quic-go/quic-go/sys_conn.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn.go
rename to vendor/github.com/quic-go/quic-go/sys_conn.go
index 7cc054658..d6c1d6164 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn.go
@@ -5,8 +5,8 @@ import (
"syscall"
"time"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
// OOBCapablePacketConn is a connection that allows the reading of ECN bits from the IP header.
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go b/vendor/github.com/quic-go/quic-go/sys_conn_df.go
similarity index 90%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_df.go
index ae9274d97..ef9f981ac 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_df.go
@@ -1,5 +1,4 @@
//go:build !linux && !windows
-// +build !linux,!windows
package quic
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go
similarity index 94%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go
index 17ac67f12..98542b410 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go
@@ -1,5 +1,4 @@
//go:build linux
-// +build linux
package quic
@@ -7,8 +6,9 @@ import (
"errors"
"syscall"
- "github.com/lucas-clemente/quic-go/internal/utils"
"golang.org/x/sys/unix"
+
+ "github.com/quic-go/quic-go/internal/utils"
)
func setDF(rawConn syscall.RawConn) error {
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go
index 4649f6463..9855e8de8 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go
@@ -1,5 +1,4 @@
//go:build windows
-// +build windows
package quic
@@ -7,8 +6,9 @@ import (
"errors"
"syscall"
- "github.com/lucas-clemente/quic-go/internal/utils"
"golang.org/x/sys/windows"
+
+ "github.com/quic-go/quic-go/internal/utils"
)
const (
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go
similarity index 95%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go
index eabf489f1..7ad5f3af1 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go
@@ -1,5 +1,4 @@
//go:build darwin
-// +build darwin
package quic
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go
similarity index 93%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go
index 0b3e8434b..8d16d0b91 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go
@@ -1,5 +1,4 @@
//go:build freebsd
-// +build freebsd
package quic
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go
similarity index 96%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go
index 51bec9002..61c3f54ba 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go
@@ -1,5 +1,4 @@
//go:build linux
-// +build linux
package quic
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go
similarity index 87%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go
index e3b0d11f6..7ab5040aa 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go
@@ -1,5 +1,4 @@
//go:build !darwin && !linux && !freebsd && !windows
-// +build !darwin,!linux,!freebsd,!windows
package quic
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go
similarity index 87%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_oob.go
index acd74d023..806dfb81a 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go
@@ -1,5 +1,4 @@
//go:build darwin || linux || freebsd
-// +build darwin linux freebsd
package quic
@@ -15,8 +14,8 @@ import (
"golang.org/x/net/ipv6"
"golang.org/x/sys/unix"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/utils"
)
const (
@@ -123,10 +122,15 @@ func newConn(c OOBCapablePacketConn) (*oobConn, error) {
bc = ipv4.NewPacketConn(c)
}
+ msgs := make([]ipv4.Message, batchSize)
+ for i := range msgs {
+ // preallocate the [][]byte
+ msgs[i].Buffers = make([][]byte, 1)
+ }
oobConn := &oobConn{
OOBCapablePacketConn: c,
batchConn: bc,
- messages: make([]ipv4.Message, batchSize),
+ messages: msgs,
readPos: batchSize,
}
for i := 0; i < batchSize; i++ {
@@ -143,7 +147,7 @@ func (c *oobConn) ReadPacket() (*receivedPacket, error) {
buffer := getPacketBuffer()
buffer.Data = buffer.Data[:protocol.MaxPacketBufferSize]
c.buffers[i] = buffer
- c.messages[i].Buffers = [][]byte{c.buffers[i].Data}
+ c.messages[i].Buffers[0] = c.buffers[i].Data
}
c.readPos = 0
@@ -157,18 +161,20 @@ func (c *oobConn) ReadPacket() (*receivedPacket, error) {
msg := c.messages[c.readPos]
buffer := c.buffers[c.readPos]
c.readPos++
- ctrlMsgs, err := unix.ParseSocketControlMessage(msg.OOB[:msg.NN])
- if err != nil {
- return nil, err
- }
+
+ data := msg.OOB[:msg.NN]
var ecn protocol.ECN
var destIP net.IP
var ifIndex uint32
- for _, ctrlMsg := range ctrlMsgs {
- if ctrlMsg.Header.Level == unix.IPPROTO_IP {
- switch ctrlMsg.Header.Type {
+ for len(data) > 0 {
+ hdr, body, remainder, err := unix.ParseOneSocketControlMessage(data)
+ if err != nil {
+ return nil, err
+ }
+ if hdr.Level == unix.IPPROTO_IP {
+ switch hdr.Type {
case msgTypeIPTOS:
- ecn = protocol.ECN(ctrlMsg.Data[0] & ecnMask)
+ ecn = protocol.ECN(body[0] & ecnMask)
case msgTypeIPv4PKTINFO:
// struct in_pktinfo {
// unsigned int ipi_ifindex; /* Interface index */
@@ -177,33 +183,34 @@ func (c *oobConn) ReadPacket() (*receivedPacket, error) {
// address */
// };
ip := make([]byte, 4)
- if len(ctrlMsg.Data) == 12 {
- ifIndex = binary.LittleEndian.Uint32(ctrlMsg.Data)
- copy(ip, ctrlMsg.Data[8:12])
- } else if len(ctrlMsg.Data) == 4 {
+ if len(body) == 12 {
+ ifIndex = binary.LittleEndian.Uint32(body)
+ copy(ip, body[8:12])
+ } else if len(body) == 4 {
// FreeBSD
- copy(ip, ctrlMsg.Data)
+ copy(ip, body)
}
destIP = net.IP(ip)
}
}
- if ctrlMsg.Header.Level == unix.IPPROTO_IPV6 {
- switch ctrlMsg.Header.Type {
+ if hdr.Level == unix.IPPROTO_IPV6 {
+ switch hdr.Type {
case unix.IPV6_TCLASS:
- ecn = protocol.ECN(ctrlMsg.Data[0] & ecnMask)
+ ecn = protocol.ECN(body[0] & ecnMask)
case msgTypeIPv6PKTINFO:
// struct in6_pktinfo {
// struct in6_addr ipi6_addr; /* src/dst IPv6 address */
// unsigned int ipi6_ifindex; /* send/recv interface index */
// };
- if len(ctrlMsg.Data) == 20 {
+ if len(body) == 20 {
ip := make([]byte, 16)
- copy(ip, ctrlMsg.Data[:16])
+ copy(ip, body[:16])
destIP = net.IP(ip)
- ifIndex = binary.LittleEndian.Uint32(ctrlMsg.Data[16:])
+ ifIndex = binary.LittleEndian.Uint32(body[16:])
}
}
}
+ data = remainder
}
var info *packetInfo
if destIP != nil {
diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_windows.go
similarity index 97%
rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go
rename to vendor/github.com/quic-go/quic-go/sys_conn_windows.go
index f2cc22ab7..b003fe94a 100644
--- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go
+++ b/vendor/github.com/quic-go/quic-go/sys_conn_windows.go
@@ -1,5 +1,4 @@
//go:build windows
-// +build windows
package quic
diff --git a/vendor/github.com/lucas-clemente/quic-go/token_store.go b/vendor/github.com/quic-go/quic-go/token_store.go
similarity index 84%
rename from vendor/github.com/lucas-clemente/quic-go/token_store.go
rename to vendor/github.com/quic-go/quic-go/token_store.go
index 9641dc5a7..00460e502 100644
--- a/vendor/github.com/lucas-clemente/quic-go/token_store.go
+++ b/vendor/github.com/quic-go/quic-go/token_store.go
@@ -1,10 +1,10 @@
package quic
import (
- "container/list"
"sync"
- "github.com/lucas-clemente/quic-go/internal/utils"
+ "github.com/quic-go/quic-go/internal/utils"
+ list "github.com/quic-go/quic-go/internal/utils/linkedlist"
)
type singleOriginTokenStore struct {
@@ -48,8 +48,8 @@ type lruTokenStoreEntry struct {
type lruTokenStore struct {
mutex sync.Mutex
- m map[string]*list.Element
- q *list.List
+ m map[string]*list.Element[*lruTokenStoreEntry]
+ q *list.List[*lruTokenStoreEntry]
capacity int
singleOriginSize int
}
@@ -61,8 +61,8 @@ var _ TokenStore = &lruTokenStore{}
// tokensPerOrigin specifies the maximum number of tokens per origin.
func NewLRUTokenStore(maxOrigins, tokensPerOrigin int) TokenStore {
return &lruTokenStore{
- m: make(map[string]*list.Element),
- q: list.New(),
+ m: make(map[string]*list.Element[*lruTokenStoreEntry]),
+ q: list.New[*lruTokenStoreEntry](),
capacity: maxOrigins,
singleOriginSize: tokensPerOrigin,
}
@@ -73,7 +73,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) {
defer s.mutex.Unlock()
if el, ok := s.m[key]; ok {
- entry := el.Value.(*lruTokenStoreEntry)
+ entry := el.Value
entry.cache.Add(token)
s.q.MoveToFront(el)
return
@@ -90,7 +90,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) {
}
elem := s.q.Back()
- entry := elem.Value.(*lruTokenStoreEntry)
+ entry := elem.Value
delete(s.m, entry.key)
entry.key = key
entry.cache = newSingleOriginTokenStore(s.singleOriginSize)
@@ -106,7 +106,7 @@ func (s *lruTokenStore) Pop(key string) *ClientToken {
var token *ClientToken
if el, ok := s.m[key]; ok {
s.q.MoveToFront(el)
- cache := el.Value.(*lruTokenStoreEntry).cache
+ cache := el.Value.cache
token = cache.Pop()
if cache.Len() == 0 {
s.q.Remove(el)
diff --git a/vendor/github.com/quic-go/quic-go/tools.go b/vendor/github.com/quic-go/quic-go/tools.go
new file mode 100644
index 000000000..e848317f1
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/tools.go
@@ -0,0 +1,8 @@
+//go:build tools
+
+package quic
+
+import (
+ _ "github.com/golang/mock/mockgen"
+ _ "github.com/onsi/ginkgo/v2/ginkgo"
+)
diff --git a/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go b/vendor/github.com/quic-go/quic-go/window_update_queue.go
similarity index 91%
rename from vendor/github.com/lucas-clemente/quic-go/window_update_queue.go
rename to vendor/github.com/quic-go/quic-go/window_update_queue.go
index 2abcf6739..9ed121430 100644
--- a/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go
+++ b/vendor/github.com/quic-go/quic-go/window_update_queue.go
@@ -3,9 +3,9 @@ package quic
import (
"sync"
- "github.com/lucas-clemente/quic-go/internal/flowcontrol"
- "github.com/lucas-clemente/quic-go/internal/protocol"
- "github.com/lucas-clemente/quic-go/internal/wire"
+ "github.com/quic-go/quic-go/internal/flowcontrol"
+ "github.com/quic-go/quic-go/internal/protocol"
+ "github.com/quic-go/quic-go/internal/wire"
)
type windowUpdateQueue struct {
diff --git a/vendor/github.com/quic-go/quic-go/zero_rtt_queue.go b/vendor/github.com/quic-go/quic-go/zero_rtt_queue.go
new file mode 100644
index 000000000..b81a936e0
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/zero_rtt_queue.go
@@ -0,0 +1,34 @@
+package quic
+
+import (
+ "time"
+
+ "github.com/quic-go/quic-go/internal/protocol"
+)
+
+type zeroRTTQueue struct {
+ queue []*receivedPacket
+ retireTimer *time.Timer
+}
+
+var _ packetHandler = &zeroRTTQueue{}
+
+func (h *zeroRTTQueue) handlePacket(p *receivedPacket) {
+ if len(h.queue) < protocol.Max0RTTQueueLen {
+ h.queue = append(h.queue, p)
+ }
+}
+func (h *zeroRTTQueue) shutdown() {}
+func (h *zeroRTTQueue) destroy(error) {}
+func (h *zeroRTTQueue) getPerspective() protocol.Perspective { return protocol.PerspectiveClient }
+func (h *zeroRTTQueue) EnqueueAll(sess packetHandler) {
+ for _, p := range h.queue {
+ sess.handlePacket(p)
+ }
+}
+
+func (h *zeroRTTQueue) Clear() {
+ for _, p := range h.queue {
+ p.buffer.Release()
+ }
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/.gitignore b/vendor/github.com/quic-go/webtransport-go/.gitignore
new file mode 100644
index 000000000..8cbead44b
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/.gitignore
@@ -0,0 +1 @@
+qlog/
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/vendor/github.com/quic-go/webtransport-go/LICENSE
similarity index 93%
rename from vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
rename to vendor/github.com/quic-go/webtransport-go/LICENSE
index 65dc692b6..0e78adcc1 100644
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
+++ b/vendor/github.com/quic-go/webtransport-go/LICENSE
@@ -1,6 +1,4 @@
-Copyright (c) Yasuhiro MATSUMOTO
-
-MIT License (Expat)
+Copyright 2022 Marten Seemann
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
diff --git a/vendor/github.com/quic-go/webtransport-go/README.md b/vendor/github.com/quic-go/webtransport-go/README.md
new file mode 100644
index 000000000..c655847bc
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/README.md
@@ -0,0 +1,39 @@
+# webtransport-go
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/webtransport-go)](https://pkg.go.dev/github.com/quic-go/webtransport-go)
+[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/webtransport-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/webtransport-go/)
+
+webtransport-go is an implementation of the WebTransport protocol, based on [quic-go](https://github.com/quic-go/quic-go). It currently implements [draft-02](https://www.ietf.org/archive/id/draft-ietf-webtrans-http3-02.html) of the specification.
+
+## Running a Server
+
+```go
+// create a new webtransport.Server, listening on (UDP) port 443
+s := webtransport.Server{
+ H3: http3.Server{Addr: ":443"},
+}
+
+// Create a new HTTP endpoint /webtransport.
+http.HandleFunc("/webtransport", func(w http.ResponseWriter, r *http.Request) {
+ conn, err := s.Upgrade(w, r)
+ if err != nil {
+ log.Printf("upgrading failed: %s", err)
+ w.WriteHeader(500)
+ return
+ }
+ // Handle the connection. Here goes the application logic.
+})
+
+s.ListenAndServeTLS(certFile, keyFile)
+```
+
+Now that the server is running, Chrome can be used to establish a new WebTransport session as described in [this tutorial](https://web.dev/webtransport/).
+
+## Running a Client
+
+```go
+var d webtransport.Dialer
+rsp, conn, err := d.Dial(ctx, "https://example.com/webtransport", nil)
+// err is only nil if rsp.StatusCode is a 2xx
+// Handle the connection. Here goes the application logic.
+```
diff --git a/vendor/github.com/quic-go/webtransport-go/client.go b/vendor/github.com/quic-go/webtransport-go/client.go
new file mode 100644
index 000000000..ffcb7497d
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/client.go
@@ -0,0 +1,124 @@
+package webtransport
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+type Dialer struct {
+ // If not set, reasonable defaults will be used.
+ // In order for WebTransport to function, this implementation will:
+ // * overwrite the StreamHijacker and UniStreamHijacker
+ // * enable datagram support
+ // * set the MaxIncomingStreams to 100 on the quic.Config, if unset
+ *http3.RoundTripper
+
+ // StreamReorderingTime is the time an incoming WebTransport stream that cannot be associated
+ // with a session is buffered.
+ // This can happen if the response to a CONNECT request (that creates a new session) is reordered,
+ // and arrives after the first WebTransport stream(s) for that session.
+ // Defaults to 5 seconds.
+ StreamReorderingTimeout time.Duration
+
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ initOnce sync.Once
+
+ conns sessionManager
+}
+
+func (d *Dialer) init() {
+ timeout := d.StreamReorderingTimeout
+ if timeout == 0 {
+ timeout = 5 * time.Second
+ }
+ d.conns = *newSessionManager(timeout)
+ d.ctx, d.ctxCancel = context.WithCancel(context.Background())
+ if d.RoundTripper == nil {
+ d.RoundTripper = &http3.RoundTripper{}
+ }
+ d.RoundTripper.EnableDatagrams = true
+ if d.RoundTripper.AdditionalSettings == nil {
+ d.RoundTripper.AdditionalSettings = make(map[uint64]uint64)
+ }
+ d.RoundTripper.StreamHijacker = func(ft http3.FrameType, conn quic.Connection, str quic.Stream, e error) (hijacked bool, err error) {
+ if isWebTransportError(e) {
+ return true, nil
+ }
+ if ft != webTransportFrameType {
+ return false, nil
+ }
+ id, err := quicvarint.Read(quicvarint.NewReader(str))
+ if err != nil {
+ if isWebTransportError(err) {
+ return true, nil
+ }
+ return false, err
+ }
+ d.conns.AddStream(conn, str, sessionID(id))
+ return true, nil
+ }
+ d.RoundTripper.UniStreamHijacker = func(st http3.StreamType, conn quic.Connection, str quic.ReceiveStream, err error) (hijacked bool) {
+ if st != webTransportUniStreamType && !isWebTransportError(err) {
+ return false
+ }
+ d.conns.AddUniStream(conn, str)
+ return true
+ }
+ if d.QuicConfig == nil {
+ d.QuicConfig = &quic.Config{}
+ }
+ if d.QuicConfig.MaxIncomingStreams == 0 {
+ d.QuicConfig.MaxIncomingStreams = 100
+ }
+}
+
+func (d *Dialer) Dial(ctx context.Context, urlStr string, reqHdr http.Header) (*http.Response, *Session, error) {
+ d.initOnce.Do(func() { d.init() })
+
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+ if reqHdr == nil {
+ reqHdr = http.Header{}
+ }
+ reqHdr.Add(webTransportDraftOfferHeaderKey, "1")
+ req := &http.Request{
+ Method: http.MethodConnect,
+ Header: reqHdr,
+ Proto: "webtransport",
+ Host: u.Host,
+ URL: u,
+ }
+ req = req.WithContext(ctx)
+
+ rsp, err := d.RoundTripper.RoundTripOpt(req, http3.RoundTripOpt{DontCloseRequestStream: true})
+ if err != nil {
+ return nil, nil, err
+ }
+ if rsp.StatusCode < 200 || rsp.StatusCode >= 300 {
+ return rsp, nil, fmt.Errorf("received status %d", rsp.StatusCode)
+ }
+ str := rsp.Body.(http3.HTTPStreamer).HTTPStream()
+ conn := d.conns.AddSession(
+ rsp.Body.(http3.Hijacker).StreamCreator(),
+ sessionID(str.StreamID()),
+ str,
+ )
+ return rsp, conn, nil
+}
+
+func (d *Dialer) Close() error {
+ d.ctxCancel()
+ return nil
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/codecov.yml b/vendor/github.com/quic-go/webtransport-go/codecov.yml
new file mode 100644
index 000000000..ad40e7571
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/codecov.yml
@@ -0,0 +1,9 @@
+comment: false
+coverage:
+ status:
+ patch:
+ default:
+ informational: true
+ project:
+ default:
+ informational: true
diff --git a/vendor/github.com/quic-go/webtransport-go/errors.go b/vendor/github.com/quic-go/webtransport-go/errors.go
new file mode 100644
index 000000000..9929513e4
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/errors.go
@@ -0,0 +1,78 @@
+package webtransport
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/quic-go/quic-go"
+)
+
+// StreamErrorCode is an error code used for stream termination.
+type StreamErrorCode uint8
+
+// SessionErrorCode is an error code for session termination.
+type SessionErrorCode uint32
+
+const (
+ firstErrorCode = 0x52e4a40fa8db
+ lastErrorCode = 0x52e4a40fa9e2
+)
+
+func webtransportCodeToHTTPCode(n StreamErrorCode) quic.StreamErrorCode {
+ return quic.StreamErrorCode(firstErrorCode) + quic.StreamErrorCode(n) + quic.StreamErrorCode(n/0x1e)
+}
+
+func httpCodeToWebtransportCode(h quic.StreamErrorCode) (StreamErrorCode, error) {
+ if h < firstErrorCode || h > lastErrorCode {
+ return 0, errors.New("error code outside of expected range")
+ }
+ if (h-0x21)%0x1f == 0 {
+ return 0, errors.New("invalid error code")
+ }
+ shifted := h - firstErrorCode
+ return StreamErrorCode(shifted - shifted/0x1f), nil
+}
+
+func isWebTransportError(e error) bool {
+ if e == nil {
+ return false
+ }
+ var strErr *quic.StreamError
+ if !errors.As(e, &strErr) {
+ return false
+ }
+ if strErr.ErrorCode == sessionCloseErrorCode {
+ return true
+ }
+ _, err := httpCodeToWebtransportCode(strErr.ErrorCode)
+ return err == nil
+}
+
+// WebTransportBufferedStreamRejectedErrorCode is the error code of the
+// H3_WEBTRANSPORT_BUFFERED_STREAM_REJECTED error.
+const WebTransportBufferedStreamRejectedErrorCode quic.StreamErrorCode = 0x3994bd84
+
+// StreamError is the error that is returned from stream operations (Read, Write) when the stream is canceled.
+type StreamError struct {
+ ErrorCode StreamErrorCode
+}
+
+func (e *StreamError) Is(target error) bool {
+ _, ok := target.(*StreamError)
+ return ok
+}
+
+func (e *StreamError) Error() string {
+ return fmt.Sprintf("stream canceled with error code %d", e.ErrorCode)
+}
+
+// ConnectionError is a WebTransport connection error.
+type ConnectionError struct {
+ Remote bool
+ ErrorCode SessionErrorCode
+ Message string
+}
+
+var _ error = &ConnectionError{}
+
+func (e *ConnectionError) Error() string { return e.Message }
diff --git a/vendor/github.com/quic-go/webtransport-go/protocol.go b/vendor/github.com/quic-go/webtransport-go/protocol.go
new file mode 100644
index 000000000..1770f26e0
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/protocol.go
@@ -0,0 +1,5 @@
+package webtransport
+
+const settingsEnableWebtransport = 0x2b603742
+
+const protocolHeader = "webtransport"
diff --git a/vendor/github.com/quic-go/webtransport-go/server.go b/vendor/github.com/quic-go/webtransport-go/server.go
new file mode 100644
index 000000000..7b61346aa
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/server.go
@@ -0,0 +1,227 @@
+package webtransport
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+const (
+ webTransportDraftOfferHeaderKey = "Sec-Webtransport-Http3-Draft02"
+ webTransportDraftHeaderKey = "Sec-Webtransport-Http3-Draft"
+ webTransportDraftHeaderValue = "draft02"
+)
+
+const (
+ webTransportFrameType = 0x41
+ webTransportUniStreamType = 0x54
+)
+
+type Server struct {
+ H3 http3.Server
+
+ // StreamReorderingTime is the time an incoming WebTransport stream that cannot be associated
+ // with a session is buffered.
+ // This can happen if the CONNECT request (that creates a new session) is reordered, and arrives
+ // after the first WebTransport stream(s) for that session.
+ // Defaults to 5 seconds.
+ StreamReorderingTimeout time.Duration
+
+ // CheckOrigin is used to validate the request origin, thereby preventing cross-site request forgery.
+ // CheckOrigin returns true if the request Origin header is acceptable.
+ // If unset, a safe default is used: If the Origin header is set, it is checked that it
+ // matches the request's Host header.
+ CheckOrigin func(r *http.Request) bool
+
+ ctx context.Context // is closed when Close is called
+ ctxCancel context.CancelFunc
+ refCount sync.WaitGroup
+
+ initOnce sync.Once
+ initErr error
+
+ conns *sessionManager
+}
+
+func (s *Server) initialize() error {
+ s.initOnce.Do(func() {
+ s.initErr = s.init()
+ })
+ return s.initErr
+}
+
+func (s *Server) init() error {
+ s.ctx, s.ctxCancel = context.WithCancel(context.Background())
+ timeout := s.StreamReorderingTimeout
+ if timeout == 0 {
+ timeout = 5 * time.Second
+ }
+ s.conns = newSessionManager(timeout)
+ if s.CheckOrigin == nil {
+ s.CheckOrigin = checkSameOrigin
+ }
+
+ // configure the http3.Server
+ if s.H3.AdditionalSettings == nil {
+ s.H3.AdditionalSettings = make(map[uint64]uint64)
+ }
+ s.H3.AdditionalSettings[settingsEnableWebtransport] = 1
+ s.H3.EnableDatagrams = true
+ if s.H3.StreamHijacker != nil {
+ return errors.New("StreamHijacker already set")
+ }
+ s.H3.StreamHijacker = func(ft http3.FrameType, qconn quic.Connection, str quic.Stream, err error) (bool /* hijacked */, error) {
+ if isWebTransportError(err) {
+ return true, nil
+ }
+ if ft != webTransportFrameType {
+ return false, nil
+ }
+ // Reading the varint might block if the peer sends really small frames, but this is fine.
+ // This function is called from the HTTP/3 request handler, which runs in its own Go routine.
+ id, err := quicvarint.Read(quicvarint.NewReader(str))
+ if err != nil {
+ if isWebTransportError(err) {
+ return true, nil
+ }
+ return false, err
+ }
+ s.conns.AddStream(qconn, str, sessionID(id))
+ return true, nil
+ }
+ s.H3.UniStreamHijacker = func(st http3.StreamType, qconn quic.Connection, str quic.ReceiveStream, err error) (hijacked bool) {
+ if st != webTransportUniStreamType && !isWebTransportError(err) {
+ return false
+ }
+ s.conns.AddUniStream(qconn, str)
+ return true
+ }
+ return nil
+}
+
+func (s *Server) Serve(conn net.PacketConn) error {
+ if err := s.initialize(); err != nil {
+ return err
+ }
+ return s.H3.Serve(conn)
+}
+
+// ServeQUICConn serves a single QUIC connection.
+func (s *Server) ServeQUICConn(conn quic.Connection) error {
+ if err := s.initialize(); err != nil {
+ return err
+ }
+ return s.H3.ServeQUICConn(conn)
+}
+
+func (s *Server) ListenAndServe() error {
+ if err := s.initialize(); err != nil {
+ return err
+ }
+ return s.H3.ListenAndServe()
+}
+
+func (s *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ if err := s.initialize(); err != nil {
+ return err
+ }
+ return s.H3.ListenAndServeTLS(certFile, keyFile)
+}
+
+func (s *Server) Close() error {
+ // Make sure that ctxCancel is defined.
+ // This is expected to be uncommon.
+ // It only happens if the server is closed without Serve / ListenAndServe having been called.
+ s.initOnce.Do(func() {})
+
+ if s.ctxCancel != nil {
+ s.ctxCancel()
+ }
+ if s.conns != nil {
+ s.conns.Close()
+ }
+ err := s.H3.Close()
+ s.refCount.Wait()
+ return err
+}
+
+func (s *Server) Upgrade(w http.ResponseWriter, r *http.Request) (*Session, error) {
+ if r.Method != http.MethodConnect {
+ return nil, fmt.Errorf("expected CONNECT request, got %s", r.Method)
+ }
+ if r.Proto != protocolHeader {
+ return nil, fmt.Errorf("unexpected protocol: %s", r.Proto)
+ }
+ if v, ok := r.Header[webTransportDraftOfferHeaderKey]; !ok || len(v) != 1 || v[0] != "1" {
+ return nil, fmt.Errorf("missing or invalid %s header", webTransportDraftOfferHeaderKey)
+ }
+ if !s.CheckOrigin(r) {
+ return nil, errors.New("webtransport: request origin not allowed")
+ }
+ w.Header().Add(webTransportDraftHeaderKey, webTransportDraftHeaderValue)
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
+ httpStreamer, ok := r.Body.(http3.HTTPStreamer)
+ if !ok { // should never happen, unless quic-go changed the API
+ return nil, errors.New("failed to take over HTTP stream")
+ }
+ str := httpStreamer.HTTPStream()
+ sID := sessionID(str.StreamID())
+
+ hijacker, ok := w.(http3.Hijacker)
+ if !ok { // should never happen, unless quic-go changed the API
+ return nil, errors.New("failed to hijack")
+ }
+ return s.conns.AddSession(
+ hijacker.StreamCreator(),
+ sID,
+ r.Body.(http3.HTTPStreamer).HTTPStream(),
+ ), nil
+}
+
+// copied from https://github.com/gorilla/websocket
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header.Get("Origin")
+ if origin == "" {
+ return true
+ }
+ u, err := url.Parse(origin)
+ if err != nil {
+ return false
+ }
+ return equalASCIIFold(u.Host, r.Host)
+}
+
+// copied from https://github.com/gorilla/websocket
+func equalASCIIFold(s, t string) bool {
+ for s != "" && t != "" {
+ sr, size := utf8.DecodeRuneInString(s)
+ s = s[size:]
+ tr, size := utf8.DecodeRuneInString(t)
+ t = t[size:]
+ if sr == tr {
+ continue
+ }
+ if 'A' <= sr && sr <= 'Z' {
+ sr = sr + 'a' - 'A'
+ }
+ if 'A' <= tr && tr <= 'Z' {
+ tr = tr + 'a' - 'A'
+ }
+ if sr != tr {
+ return false
+ }
+ }
+ return s == t
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/session.go b/vendor/github.com/quic-go/webtransport-go/session.go
new file mode 100644
index 000000000..f440059c3
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/session.go
@@ -0,0 +1,418 @@
+package webtransport
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "io"
+ "math/rand"
+ "net"
+ "sync"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+// sessionID is the WebTransport Session ID
+type sessionID uint64
+
+const closeWebtransportSessionCapsuleType http3.CapsuleType = 0x2843
+
+type acceptQueue[T any] struct {
+ mx sync.Mutex
+ // The channel is used to notify consumers (via Chan) about new incoming items.
+ // Needs to be buffered to preserve the notification if an item is enqueued
+ // between a call to Next and to Chan.
+ c chan struct{}
+ // Contains all the streams waiting to be accepted.
+ // There's no explicit limit to the length of the queue, but it is implicitly
+ // limited by the stream flow control provided by QUIC.
+ queue []T
+}
+
+func newAcceptQueue[T any]() *acceptQueue[T] {
+ return &acceptQueue[T]{c: make(chan struct{}, 1)}
+}
+
+func (q *acceptQueue[T]) Add(str T) {
+ q.mx.Lock()
+ q.queue = append(q.queue, str)
+ q.mx.Unlock()
+
+ select {
+ case q.c <- struct{}{}:
+ default:
+ }
+}
+
+func (q *acceptQueue[T]) Next() T {
+ q.mx.Lock()
+ defer q.mx.Unlock()
+
+ if len(q.queue) == 0 {
+ return *new(T)
+ }
+ str := q.queue[0]
+ q.queue = q.queue[1:]
+ return str
+}
+
+func (q *acceptQueue[T]) Chan() <-chan struct{} { return q.c }
+
+type Session struct {
+ sessionID sessionID
+ qconn http3.StreamCreator
+ requestStr quic.Stream
+
+ streamHdr []byte
+ uniStreamHdr []byte
+
+ ctx context.Context
+ closeMx sync.Mutex
+ closeErr error // not nil once the session is closed
+ // streamCtxs holds all the context.CancelFuncs of calls to Open{Uni}StreamSync calls currently active.
+ // When the session is closed, this allows us to cancel all these contexts and make those calls return.
+ streamCtxs map[int]context.CancelFunc
+
+ bidiAcceptQueue acceptQueue[Stream]
+ uniAcceptQueue acceptQueue[ReceiveStream]
+
+ // TODO: garbage collect streams from when they are closed
+ streams streamsMap
+}
+
+func newSession(sessionID sessionID, qconn http3.StreamCreator, requestStr quic.Stream) *Session {
+ tracingID := qconn.Context().Value(quic.ConnectionTracingKey).(uint64)
+ ctx, ctxCancel := context.WithCancel(context.WithValue(context.Background(), quic.ConnectionTracingKey, tracingID))
+ c := &Session{
+ sessionID: sessionID,
+ qconn: qconn,
+ requestStr: requestStr,
+ ctx: ctx,
+ streamCtxs: make(map[int]context.CancelFunc),
+ bidiAcceptQueue: *newAcceptQueue[Stream](),
+ uniAcceptQueue: *newAcceptQueue[ReceiveStream](),
+ streams: *newStreamsMap(),
+ }
+ // precompute the headers for unidirectional streams
+ c.uniStreamHdr = make([]byte, 0, 2+quicvarint.Len(uint64(c.sessionID)))
+ c.uniStreamHdr = quicvarint.Append(c.uniStreamHdr, webTransportUniStreamType)
+ c.uniStreamHdr = quicvarint.Append(c.uniStreamHdr, uint64(c.sessionID))
+ // precompute the headers for bidirectional streams
+ c.streamHdr = make([]byte, 0, 2+quicvarint.Len(uint64(c.sessionID)))
+ c.streamHdr = quicvarint.Append(c.streamHdr, webTransportFrameType)
+ c.streamHdr = quicvarint.Append(c.streamHdr, uint64(c.sessionID))
+
+ go func() {
+ defer ctxCancel()
+ c.handleConn()
+ }()
+ return c
+}
+
+func (s *Session) handleConn() {
+ var closeErr *ConnectionError
+ err := s.parseNextCapsule()
+ if !errors.As(err, &closeErr) {
+ closeErr = &ConnectionError{Remote: true}
+ }
+
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+ // If we closed the connection, the closeErr will be set in Close.
+ if s.closeErr == nil {
+ s.closeErr = closeErr
+ }
+ for _, cancel := range s.streamCtxs {
+ cancel()
+ }
+ s.streams.CloseSession()
+}
+
+// parseNextCapsule parses the next Capsule sent on the request stream.
+// It returns a ConnectionError, if the capsule received is a CLOSE_WEBTRANSPORT_SESSION Capsule.
+func (s *Session) parseNextCapsule() error {
+ for {
+ // TODO: enforce max size
+ typ, r, err := http3.ParseCapsule(quicvarint.NewReader(s.requestStr))
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case closeWebtransportSessionCapsuleType:
+ b := make([]byte, 4)
+ if _, err := io.ReadFull(r, b); err != nil {
+ return err
+ }
+ appErrCode := binary.BigEndian.Uint32(b)
+ appErrMsg, err := io.ReadAll(r)
+ if err != nil {
+ return err
+ }
+ return &ConnectionError{
+ Remote: true,
+ ErrorCode: SessionErrorCode(appErrCode),
+ Message: string(appErrMsg),
+ }
+ default:
+ // unknown capsule, skip it
+ if _, err := io.ReadAll(r); err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func (s *Session) addStream(qstr quic.Stream, addStreamHeader bool) Stream {
+ var hdr []byte
+ if addStreamHeader {
+ hdr = s.streamHdr
+ }
+ str := newStream(qstr, hdr, func() { s.streams.RemoveStream(qstr.StreamID()) })
+ s.streams.AddStream(qstr.StreamID(), str.closeWithSession)
+ return str
+}
+
+func (s *Session) addReceiveStream(qstr quic.ReceiveStream) ReceiveStream {
+ str := newReceiveStream(qstr, func() { s.streams.RemoveStream(qstr.StreamID()) })
+ s.streams.AddStream(qstr.StreamID(), func() {
+ str.closeWithSession()
+ })
+ return str
+}
+
+func (s *Session) addSendStream(qstr quic.SendStream) SendStream {
+ str := newSendStream(qstr, s.uniStreamHdr, func() { s.streams.RemoveStream(qstr.StreamID()) })
+ s.streams.AddStream(qstr.StreamID(), str.closeWithSession)
+ return str
+}
+
+// addIncomingStream adds a bidirectional stream that the remote peer opened
+func (s *Session) addIncomingStream(qstr quic.Stream) {
+ s.closeMx.Lock()
+ closeErr := s.closeErr
+ if closeErr != nil {
+ s.closeMx.Unlock()
+ qstr.CancelRead(sessionCloseErrorCode)
+ qstr.CancelWrite(sessionCloseErrorCode)
+ return
+ }
+ str := s.addStream(qstr, false)
+ s.closeMx.Unlock()
+
+ s.bidiAcceptQueue.Add(str)
+}
+
+// addIncomingUniStream adds a unidirectional stream that the remote peer opened
+func (s *Session) addIncomingUniStream(qstr quic.ReceiveStream) {
+ s.closeMx.Lock()
+ closeErr := s.closeErr
+ if closeErr != nil {
+ s.closeMx.Unlock()
+ qstr.CancelRead(sessionCloseErrorCode)
+ return
+ }
+ str := s.addReceiveStream(qstr)
+ s.closeMx.Unlock()
+
+ s.uniAcceptQueue.Add(str)
+}
+
+// Context returns a context that is closed when the session is closed.
+func (s *Session) Context() context.Context {
+ return s.ctx
+}
+
+func (s *Session) AcceptStream(ctx context.Context) (Stream, error) {
+ s.closeMx.Lock()
+ closeErr := s.closeErr
+ s.closeMx.Unlock()
+ if closeErr != nil {
+ return nil, closeErr
+ }
+
+ for {
+ // If there's a stream in the accept queue, return it immediately.
+ if str := s.bidiAcceptQueue.Next(); str != nil {
+ return str, nil
+ }
+ // No stream in the accept queue. Wait until we accept one.
+ select {
+ case <-s.ctx.Done():
+ return nil, s.closeErr
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-s.bidiAcceptQueue.Chan():
+ }
+ }
+}
+
+func (s *Session) AcceptUniStream(ctx context.Context) (ReceiveStream, error) {
+ s.closeMx.Lock()
+ closeErr := s.closeErr
+ s.closeMx.Unlock()
+ if closeErr != nil {
+ return nil, s.closeErr
+ }
+
+ for {
+ // If there's a stream in the accept queue, return it immediately.
+ if str := s.uniAcceptQueue.Next(); str != nil {
+ return str, nil
+ }
+ // No stream in the accept queue. Wait until we accept one.
+ select {
+ case <-s.ctx.Done():
+ return nil, s.closeErr
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-s.uniAcceptQueue.Chan():
+ }
+ }
+}
+
+func (s *Session) OpenStream() (Stream, error) {
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+
+ if s.closeErr != nil {
+ return nil, s.closeErr
+ }
+
+ qstr, err := s.qconn.OpenStream()
+ if err != nil {
+ return nil, err
+ }
+ return s.addStream(qstr, true), nil
+}
+
+func (s *Session) addStreamCtxCancel(cancel context.CancelFunc) (id int) {
+rand:
+ id = rand.Int()
+ if _, ok := s.streamCtxs[id]; ok {
+ goto rand
+ }
+ s.streamCtxs[id] = cancel
+ return id
+}
+
+func (s *Session) OpenStreamSync(ctx context.Context) (Stream, error) {
+ s.closeMx.Lock()
+ if s.closeErr != nil {
+ s.closeMx.Unlock()
+ return nil, s.closeErr
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ id := s.addStreamCtxCancel(cancel)
+ s.closeMx.Unlock()
+
+ qstr, err := s.qconn.OpenStreamSync(ctx)
+ if err != nil {
+ if s.closeErr != nil {
+ return nil, s.closeErr
+ }
+ return nil, err
+ }
+
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+ delete(s.streamCtxs, id)
+ // Some time might have passed. Check if the session is still alive
+ if s.closeErr != nil {
+ qstr.CancelWrite(sessionCloseErrorCode)
+ qstr.CancelRead(sessionCloseErrorCode)
+ return nil, s.closeErr
+ }
+ return s.addStream(qstr, true), nil
+}
+
+func (s *Session) OpenUniStream() (SendStream, error) {
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+
+ if s.closeErr != nil {
+ return nil, s.closeErr
+ }
+ qstr, err := s.qconn.OpenUniStream()
+ if err != nil {
+ return nil, err
+ }
+ return s.addSendStream(qstr), nil
+}
+
+func (s *Session) OpenUniStreamSync(ctx context.Context) (str SendStream, err error) {
+ s.closeMx.Lock()
+ if s.closeErr != nil {
+ s.closeMx.Unlock()
+ return nil, s.closeErr
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ id := s.addStreamCtxCancel(cancel)
+ s.closeMx.Unlock()
+
+ qstr, err := s.qconn.OpenUniStreamSync(ctx)
+ if err != nil {
+ if s.closeErr != nil {
+ return nil, s.closeErr
+ }
+ return nil, err
+ }
+
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+ delete(s.streamCtxs, id)
+ // Some time might have passed. Check if the session is still alive
+ if s.closeErr != nil {
+ qstr.CancelWrite(sessionCloseErrorCode)
+ return nil, s.closeErr
+ }
+ return s.addSendStream(qstr), nil
+}
+
+func (s *Session) LocalAddr() net.Addr {
+ return s.qconn.LocalAddr()
+}
+
+func (s *Session) RemoteAddr() net.Addr {
+ return s.qconn.RemoteAddr()
+}
+
+func (s *Session) CloseWithError(code SessionErrorCode, msg string) error {
+ first, err := s.closeWithError(code, msg)
+ if err != nil || !first {
+ return err
+ }
+
+ s.requestStr.CancelRead(1337)
+ err = s.requestStr.Close()
+ <-s.ctx.Done()
+ return err
+}
+
+func (s *Session) closeWithError(code SessionErrorCode, msg string) (bool /* first call to close session */, error) {
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+ // Duplicate call, or the remote already closed this session.
+ if s.closeErr != nil {
+ return false, nil
+ }
+ s.closeErr = &ConnectionError{
+ ErrorCode: code,
+ Message: msg,
+ }
+
+ b := make([]byte, 4, 4+len(msg))
+ binary.BigEndian.PutUint32(b, uint32(code))
+ b = append(b, []byte(msg)...)
+
+ return true, http3.WriteCapsule(
+ quicvarint.NewWriter(s.requestStr),
+ closeWebtransportSessionCapsuleType,
+ b,
+ )
+}
+
+func (c *Session) ConnectionState() quic.ConnectionState {
+ return c.qconn.ConnectionState()
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/session_manager.go b/vendor/github.com/quic-go/webtransport-go/session_manager.go
new file mode 100644
index 000000000..2dbb73816
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/session_manager.go
@@ -0,0 +1,195 @@
+package webtransport
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/quic-go/quicvarint"
+)
+
+// session is the map value in the conns map
+type session struct {
+ created chan struct{} // is closed once the session map has been initialized
+ counter int // how many streams are waiting for this session to be established
+ conn *Session
+}
+
+type sessionManager struct {
+ refCount sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ timeout time.Duration
+
+ mx sync.Mutex
+ conns map[http3.StreamCreator]map[sessionID]*session
+}
+
+func newSessionManager(timeout time.Duration) *sessionManager {
+ m := &sessionManager{
+ timeout: timeout,
+ conns: make(map[http3.StreamCreator]map[sessionID]*session),
+ }
+ m.ctx, m.ctxCancel = context.WithCancel(context.Background())
+ return m
+}
+
+// AddStream adds a new bidirectional stream to a WebTransport session.
+// If the WebTransport session has not yet been established,
+// it starts a new go routine and waits for establishment of the session.
+// If that takes longer than timeout, the stream is reset.
+func (m *sessionManager) AddStream(qconn http3.StreamCreator, str quic.Stream, id sessionID) {
+ sess, isExisting := m.getOrCreateSession(qconn, id)
+ if isExisting {
+ sess.conn.addIncomingStream(str)
+ return
+ }
+
+ m.refCount.Add(1)
+ go func() {
+ defer m.refCount.Done()
+ m.handleStream(str, sess)
+
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ sess.counter--
+ // Once no more streams are waiting for this session to be established,
+ // and this session is still outstanding, delete it from the map.
+ if sess.counter == 0 && sess.conn == nil {
+ m.maybeDelete(qconn, id)
+ }
+ }()
+}
+
+func (m *sessionManager) maybeDelete(qconn http3.StreamCreator, id sessionID) {
+ sessions, ok := m.conns[qconn]
+ if !ok { // should never happen
+ return
+ }
+ delete(sessions, id)
+ if len(sessions) == 0 {
+ delete(m.conns, qconn)
+ }
+}
+
+// AddUniStream adds a new unidirectional stream to a WebTransport session.
+// If the WebTransport session has not yet been established,
+// it starts a new go routine and waits for establishment of the session.
+// If that takes longer than timeout, the stream is reset.
+func (m *sessionManager) AddUniStream(qconn http3.StreamCreator, str quic.ReceiveStream) {
+ idv, err := quicvarint.Read(quicvarint.NewReader(str))
+ if err != nil {
+ str.CancelRead(1337)
+ }
+ id := sessionID(idv)
+
+ sess, isExisting := m.getOrCreateSession(qconn, id)
+ if isExisting {
+ sess.conn.addIncomingUniStream(str)
+ return
+ }
+
+ m.refCount.Add(1)
+ go func() {
+ defer m.refCount.Done()
+ m.handleUniStream(str, sess)
+
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ sess.counter--
+ // Once no more streams are waiting for this session to be established,
+ // and this session is still outstanding, delete it from the map.
+ if sess.counter == 0 && sess.conn == nil {
+ m.maybeDelete(qconn, id)
+ }
+ }()
+}
+
+func (m *sessionManager) getOrCreateSession(qconn http3.StreamCreator, id sessionID) (sess *session, existed bool) {
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ sessions, ok := m.conns[qconn]
+ if !ok {
+ sessions = make(map[sessionID]*session)
+ m.conns[qconn] = sessions
+ }
+
+ sess, ok = sessions[id]
+ if ok && sess.conn != nil {
+ return sess, true
+ }
+ if !ok {
+ sess = &session{created: make(chan struct{})}
+ sessions[id] = sess
+ }
+ sess.counter++
+ return sess, false
+}
+
+func (m *sessionManager) handleStream(str quic.Stream, sess *session) {
+ t := time.NewTimer(m.timeout)
+ defer t.Stop()
+
+ // When multiple streams are waiting for the same session to be established,
+ // the timeout is calculated for every stream separately.
+ select {
+ case <-sess.created:
+ sess.conn.addIncomingStream(str)
+ case <-t.C:
+ str.CancelRead(WebTransportBufferedStreamRejectedErrorCode)
+ str.CancelWrite(WebTransportBufferedStreamRejectedErrorCode)
+ case <-m.ctx.Done():
+ }
+}
+
+func (m *sessionManager) handleUniStream(str quic.ReceiveStream, sess *session) {
+ t := time.NewTimer(m.timeout)
+ defer t.Stop()
+
+ // When multiple streams are waiting for the same session to be established,
+ // the timeout is calculated for every stream separately.
+ select {
+ case <-sess.created:
+ sess.conn.addIncomingUniStream(str)
+ case <-t.C:
+ str.CancelRead(WebTransportBufferedStreamRejectedErrorCode)
+ case <-m.ctx.Done():
+ }
+}
+
+// AddSession adds a new WebTransport session.
+func (m *sessionManager) AddSession(qconn http3.StreamCreator, id sessionID, requestStr quic.Stream) *Session {
+ conn := newSession(id, qconn, requestStr)
+
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ sessions, ok := m.conns[qconn]
+ if !ok {
+ sessions = make(map[sessionID]*session)
+ m.conns[qconn] = sessions
+ }
+ if sess, ok := sessions[id]; ok {
+ // We might already have an entry of this session.
+ // This can happen when we receive a stream for this WebTransport session before we complete the HTTP request
+ // that establishes the session.
+ sess.conn = conn
+ close(sess.created)
+ return conn
+ }
+ c := make(chan struct{})
+ close(c)
+ sessions[id] = &session{created: c, conn: conn}
+ return conn
+}
+
+func (m *sessionManager) Close() {
+ m.ctxCancel()
+ m.refCount.Wait()
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/stream.go b/vendor/github.com/quic-go/webtransport-go/stream.go
new file mode 100644
index 000000000..d64472a79
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/stream.go
@@ -0,0 +1,214 @@
+package webtransport
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/quic-go/quic-go"
+)
+
+const sessionCloseErrorCode quic.StreamErrorCode = 0x170d7b68
+
+type SendStream interface {
+ io.Writer
+ io.Closer
+
+ StreamID() quic.StreamID
+ CancelWrite(StreamErrorCode)
+
+ SetWriteDeadline(time.Time) error
+}
+
+type ReceiveStream interface {
+ io.Reader
+
+ StreamID() quic.StreamID
+ CancelRead(StreamErrorCode)
+
+ SetReadDeadline(time.Time) error
+}
+
+type Stream interface {
+ SendStream
+ ReceiveStream
+ SetDeadline(time.Time) error
+}
+
+type sendStream struct {
+ str quic.SendStream
+ // WebTransport stream header.
+ // Set by the constructor, set to nil once sent out.
+ // Might be initialized to nil if this sendStream is part of an incoming bidirectional stream.
+ streamHdr []byte
+
+ onClose func()
+
+ once sync.Once
+}
+
+var _ SendStream = &sendStream{}
+
+func newSendStream(str quic.SendStream, hdr []byte, onClose func()) *sendStream {
+ return &sendStream{str: str, streamHdr: hdr, onClose: onClose}
+}
+
+func (s *sendStream) maybeSendStreamHeader() (err error) {
+ s.once.Do(func() {
+ if _, e := s.str.Write(s.streamHdr); e != nil {
+ err = e
+ return
+ }
+ s.streamHdr = nil
+ })
+ return
+}
+
+func (s *sendStream) Write(b []byte) (int, error) {
+ if err := s.maybeSendStreamHeader(); err != nil {
+ return 0, err
+ }
+ n, err := s.str.Write(b)
+ if err != nil && !isTimeoutError(err) {
+ s.onClose()
+ }
+ return n, maybeConvertStreamError(err)
+}
+
+func (s *sendStream) CancelWrite(e StreamErrorCode) {
+ s.str.CancelWrite(webtransportCodeToHTTPCode(e))
+ s.onClose()
+}
+
+func (s *sendStream) closeWithSession() {
+ s.str.CancelWrite(sessionCloseErrorCode)
+}
+
+func (s *sendStream) Close() error {
+ if err := s.maybeSendStreamHeader(); err != nil {
+ return err
+ }
+ s.onClose()
+ return maybeConvertStreamError(s.str.Close())
+}
+
+func (s *sendStream) SetWriteDeadline(t time.Time) error {
+ return maybeConvertStreamError(s.str.SetWriteDeadline(t))
+}
+
+func (s *sendStream) StreamID() quic.StreamID {
+ return s.str.StreamID()
+}
+
+type receiveStream struct {
+ str quic.ReceiveStream
+ onClose func()
+}
+
+var _ ReceiveStream = &receiveStream{}
+
+func newReceiveStream(str quic.ReceiveStream, onClose func()) *receiveStream {
+ return &receiveStream{str: str, onClose: onClose}
+}
+
+func (s *receiveStream) Read(b []byte) (int, error) {
+ n, err := s.str.Read(b)
+ if err != nil && !isTimeoutError(err) {
+ s.onClose()
+ }
+ return n, maybeConvertStreamError(err)
+}
+
+func (s *receiveStream) CancelRead(e StreamErrorCode) {
+ s.str.CancelRead(webtransportCodeToHTTPCode(e))
+ s.onClose()
+}
+
+func (s *receiveStream) closeWithSession() {
+ s.str.CancelRead(sessionCloseErrorCode)
+}
+
+func (s *receiveStream) SetReadDeadline(t time.Time) error {
+ return maybeConvertStreamError(s.str.SetReadDeadline(t))
+}
+
+func (s *receiveStream) StreamID() quic.StreamID {
+ return s.str.StreamID()
+}
+
+type stream struct {
+ *sendStream
+ *receiveStream
+
+ mx sync.Mutex
+ sendSideClosed, recvSideClosed bool
+ onClose func()
+}
+
+var _ Stream = &stream{}
+
+func newStream(str quic.Stream, hdr []byte, onClose func()) *stream {
+ s := &stream{onClose: onClose}
+ s.sendStream = newSendStream(str, hdr, func() { s.registerClose(true) })
+ s.receiveStream = newReceiveStream(str, func() { s.registerClose(false) })
+ return s
+}
+
+func (s *stream) registerClose(isSendSide bool) {
+ s.mx.Lock()
+ if isSendSide {
+ s.sendSideClosed = true
+ } else {
+ s.recvSideClosed = true
+ }
+ isClosed := s.sendSideClosed && s.recvSideClosed
+ s.mx.Unlock()
+
+ if isClosed {
+ s.onClose()
+ }
+}
+
+func (s *stream) closeWithSession() {
+ s.sendStream.closeWithSession()
+ s.receiveStream.closeWithSession()
+}
+
+func (s *stream) SetDeadline(t time.Time) error {
+ err1 := s.sendStream.SetWriteDeadline(t)
+ err2 := s.receiveStream.SetReadDeadline(t)
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+func (s *stream) StreamID() quic.StreamID {
+ return s.receiveStream.StreamID()
+}
+
+func maybeConvertStreamError(err error) error {
+ if err == nil {
+ return nil
+ }
+ var streamErr *quic.StreamError
+ if errors.As(err, &streamErr) {
+ errorCode, cerr := httpCodeToWebtransportCode(streamErr.ErrorCode)
+ if cerr != nil {
+ return fmt.Errorf("stream reset, but failed to convert stream error %d: %w", streamErr.ErrorCode, cerr)
+ }
+ return &StreamError{ErrorCode: errorCode}
+ }
+ return err
+}
+
+func isTimeoutError(err error) bool {
+ nerr, ok := err.(net.Error)
+ if !ok {
+ return false
+ }
+ return nerr.Timeout()
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/streams_map.go b/vendor/github.com/quic-go/webtransport-go/streams_map.go
new file mode 100644
index 000000000..c1c78323e
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/streams_map.go
@@ -0,0 +1,42 @@
+package webtransport
+
+import (
+ "sync"
+
+ "github.com/quic-go/quic-go"
+)
+
+type closeFunc func()
+
+// The streamsMap manages the streams of a single QUIC connection.
+// Note that several WebTransport sessions can share one QUIC connection.
+type streamsMap struct {
+ mx sync.Mutex
+ m map[quic.StreamID]closeFunc
+}
+
+func newStreamsMap() *streamsMap {
+ return &streamsMap{m: make(map[quic.StreamID]closeFunc)}
+}
+
+func (s *streamsMap) AddStream(id quic.StreamID, close closeFunc) {
+ s.mx.Lock()
+ s.m[id] = close
+ s.mx.Unlock()
+}
+
+func (s *streamsMap) RemoveStream(id quic.StreamID) {
+ s.mx.Lock()
+ delete(s.m, id)
+ s.mx.Unlock()
+}
+
+func (s *streamsMap) CloseSession() {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ for _, cl := range s.m {
+ cl()
+ }
+ s.m = nil
+}
diff --git a/vendor/github.com/quic-go/webtransport-go/version.json b/vendor/github.com/quic-go/webtransport-go/version.json
new file mode 100644
index 000000000..ef97c9ca1
--- /dev/null
+++ b/vendor/github.com/quic-go/webtransport-go/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.5.3"
+}
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
deleted file mode 100644
index 20dd53b8d..000000000
--- a/vendor/github.com/satori/go.uuid/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: go
-sudo: false
-go:
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - 1.8
- - 1.9
- - tip
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
-script:
- - $HOME/gopath/bin/goveralls -service=travis-ci
-notifications:
- email: false
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
deleted file mode 100644
index 926d54987..000000000
--- a/vendor/github.com/satori/go.uuid/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013-2018 by Maxim Bublis
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
deleted file mode 100644
index 7b1a722df..000000000
--- a/vendor/github.com/satori/go.uuid/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# UUID package for Go language
-
-[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
-[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
-[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
-
-This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
-
-With 100% test coverage and benchmarks out of box.
-
-Supported versions:
-* Version 1, based on timestamp and MAC address (RFC 4122)
-* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
-* Version 3, based on MD5 hashing (RFC 4122)
-* Version 4, based on random numbers (RFC 4122)
-* Version 5, based on SHA-1 hashing (RFC 4122)
-
-## Installation
-
-Use the `go` command:
-
- $ go get github.com/satori/go.uuid
-
-## Requirements
-
-UUID package requires Go >= 1.2.
-
-## Example
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/satori/go.uuid"
-)
-
-func main() {
- // Creating UUID Version 4
- u1 := uuid.NewV4()
- fmt.Printf("UUIDv4: %s\n", u1)
-
- // Parsing UUID from string input
- u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
- if err != nil {
- fmt.Printf("Something gone wrong: %s", err)
- }
- fmt.Printf("Successfully parsed: %s", u2)
-}
-```
-
-## Documentation
-
-[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
-
-## Links
-* [RFC 4122](http://tools.ietf.org/html/rfc4122)
-* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
-
-## Copyright
-
-Copyright (C) 2013-2018 by Maxim Bublis .
-
-UUID package released under MIT License.
-See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go
deleted file mode 100644
index 656892c53..000000000
--- a/vendor/github.com/satori/go.uuid/codec.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
-)
-
-// FromBytes returns UUID converted from raw byte slice input.
-// It will return error if the slice isn't 16 bytes long.
-func FromBytes(input []byte) (u UUID, err error) {
- err = u.UnmarshalBinary(input)
- return
-}
-
-// FromBytesOrNil returns UUID converted from raw byte slice input.
-// Same behavior as FromBytes, but returns a Nil UUID on error.
-func FromBytesOrNil(input []byte) UUID {
- uuid, err := FromBytes(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-// FromString returns UUID parsed from string input.
-// Input is expected in a form accepted by UnmarshalText.
-func FromString(input string) (u UUID, err error) {
- err = u.UnmarshalText([]byte(input))
- return
-}
-
-// FromStringOrNil returns UUID parsed from string input.
-// Same behavior as FromString, but returns a Nil UUID on error.
-func FromStringOrNil(input string) UUID {
- uuid, err := FromString(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by String.
-func (u UUID) MarshalText() (text []byte, err error) {
- text = []byte(u.String())
- return
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Following formats are supported:
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-// "6ba7b8109dad11d180b400c04fd430c8"
-// ABNF for supported UUID text representation follows:
-// uuid := canonical | hashlike | braced | urn
-// plain := canonical | hashlike
-// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
-// hashlike := 12hexoct
-// braced := '{' plain '}'
-// urn := URN ':' UUID-NID ':' plain
-// URN := 'urn'
-// UUID-NID := 'uuid'
-// 12hexoct := 6hexoct 6hexoct
-// 6hexoct := 4hexoct 2hexoct
-// 4hexoct := 2hexoct 2hexoct
-// 2hexoct := hexoct hexoct
-// hexoct := hexdig hexdig
-// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
-// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
-// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
-func (u *UUID) UnmarshalText(text []byte) (err error) {
- switch len(text) {
- case 32:
- return u.decodeHashLike(text)
- case 36:
- return u.decodeCanonical(text)
- case 38:
- return u.decodeBraced(text)
- case 41:
- fallthrough
- case 45:
- return u.decodeURN(text)
- default:
- return fmt.Errorf("uuid: incorrect UUID length: %s", text)
- }
-}
-
-// decodeCanonical decodes UUID string in format
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
-func (u *UUID) decodeCanonical(t []byte) (err error) {
- if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
- return fmt.Errorf("uuid: incorrect UUID format %s", t)
- }
-
- src := t[:]
- dst := u[:]
-
- for i, byteGroup := range byteGroups {
- if i > 0 {
- src = src[1:] // skip dash
- }
- _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
- if err != nil {
- return
- }
- src = src[byteGroup:]
- dst = dst[byteGroup/2:]
- }
-
- return
-}
-
-// decodeHashLike decodes UUID string in format
-// "6ba7b8109dad11d180b400c04fd430c8".
-func (u *UUID) decodeHashLike(t []byte) (err error) {
- src := t[:]
- dst := u[:]
-
- if _, err = hex.Decode(dst, src); err != nil {
- return err
- }
- return
-}
-
-// decodeBraced decodes UUID string in format
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
-// "{6ba7b8109dad11d180b400c04fd430c8}".
-func (u *UUID) decodeBraced(t []byte) (err error) {
- l := len(t)
-
- if t[0] != '{' || t[l-1] != '}' {
- return fmt.Errorf("uuid: incorrect UUID format %s", t)
- }
-
- return u.decodePlain(t[1 : l-1])
-}
-
-// decodeURN decodes UUID string in format
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
-// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
-func (u *UUID) decodeURN(t []byte) (err error) {
- total := len(t)
-
- urn_uuid_prefix := t[:9]
-
- if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
- return fmt.Errorf("uuid: incorrect UUID format: %s", t)
- }
-
- return u.decodePlain(t[9:total])
-}
-
-// decodePlain decodes UUID string in canonical format
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
-// "6ba7b8109dad11d180b400c04fd430c8".
-func (u *UUID) decodePlain(t []byte) (err error) {
- switch len(t) {
- case 32:
- return u.decodeHashLike(t)
- case 36:
- return u.decodeCanonical(t)
- default:
- return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
- }
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (u UUID) MarshalBinary() (data []byte, err error) {
- data = u.Bytes()
- return
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It will return error if the slice isn't 16 bytes long.
-func (u *UUID) UnmarshalBinary(data []byte) (err error) {
- if len(data) != Size {
- err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
- return
- }
- copy(u[:], data)
-
- return
-}
diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go
deleted file mode 100644
index 3f2f1da2d..000000000
--- a/vendor/github.com/satori/go.uuid/generator.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "encoding/binary"
- "hash"
- "net"
- "os"
- "sync"
- "time"
-)
-
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-var (
- global = newDefaultGenerator()
-
- epochFunc = unixTimeFunc
- posixUID = uint32(os.Getuid())
- posixGID = uint32(os.Getgid())
-)
-
-// NewV1 returns UUID based on current timestamp and MAC address.
-func NewV1() UUID {
- return global.NewV1()
-}
-
-// NewV2 returns DCE Security UUID based on POSIX UID/GID.
-func NewV2(domain byte) UUID {
- return global.NewV2(domain)
-}
-
-// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- return global.NewV3(ns, name)
-}
-
-// NewV4 returns random generated UUID.
-func NewV4() UUID {
- return global.NewV4()
-}
-
-// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- return global.NewV5(ns, name)
-}
-
-// Generator provides interface for generating UUIDs.
-type Generator interface {
- NewV1() UUID
- NewV2(domain byte) UUID
- NewV3(ns UUID, name string) UUID
- NewV4() UUID
- NewV5(ns UUID, name string) UUID
-}
-
-// Default generator implementation.
-type generator struct {
- storageOnce sync.Once
- storageMutex sync.Mutex
-
- lastTime uint64
- clockSequence uint16
- hardwareAddr [6]byte
-}
-
-func newDefaultGenerator() Generator {
- return &generator{}
-}
-
-// NewV1 returns UUID based on current timestamp and MAC address.
-func (g *generator) NewV1() UUID {
- u := UUID{}
-
- timeNow, clockSeq, hardwareAddr := g.getStorage()
-
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
-
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(V1)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV2 returns DCE Security UUID based on POSIX UID/GID.
-func (g *generator) NewV2(domain byte) UUID {
- u := UUID{}
-
- timeNow, clockSeq, hardwareAddr := g.getStorage()
-
- switch domain {
- case DomainPerson:
- binary.BigEndian.PutUint32(u[0:], posixUID)
- case DomainGroup:
- binary.BigEndian.PutUint32(u[0:], posixGID)
- }
-
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
- u[9] = domain
-
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(V2)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
-func (g *generator) NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(V3)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV4 returns random generated UUID.
-func (g *generator) NewV4() UUID {
- u := UUID{}
- g.safeRandom(u[:])
- u.SetVersion(V4)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
-func (g *generator) NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(V5)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-func (g *generator) initStorage() {
- g.initClockSequence()
- g.initHardwareAddr()
-}
-
-func (g *generator) initClockSequence() {
- buf := make([]byte, 2)
- g.safeRandom(buf)
- g.clockSequence = binary.BigEndian.Uint16(buf)
-}
-
-func (g *generator) initHardwareAddr() {
- interfaces, err := net.Interfaces()
- if err == nil {
- for _, iface := range interfaces {
- if len(iface.HardwareAddr) >= 6 {
- copy(g.hardwareAddr[:], iface.HardwareAddr)
- return
- }
- }
- }
-
- // Initialize hardwareAddr randomly in case
- // of real network interfaces absence
- g.safeRandom(g.hardwareAddr[:])
-
- // Set multicast bit as recommended in RFC 4122
- g.hardwareAddr[0] |= 0x01
-}
-
-func (g *generator) safeRandom(dest []byte) {
- if _, err := rand.Read(dest); err != nil {
- panic(err)
- }
-}
-
-// Returns UUID v1/v2 storage state.
-// Returns epoch timestamp, clock sequence, and hardware address.
-func (g *generator) getStorage() (uint64, uint16, []byte) {
- g.storageOnce.Do(g.initStorage)
-
- g.storageMutex.Lock()
- defer g.storageMutex.Unlock()
-
- timeNow := epochFunc()
- // Clock changed backwards since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= g.lastTime {
- g.clockSequence++
- }
- g.lastTime = timeNow
-
- return timeNow, g.clockSequence, g.hardwareAddr[:]
-}
-
-// Returns difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and current time.
-// This is default epoch calculation function.
-func unixTimeFunc() uint64 {
- return epochStart + uint64(time.Now().UnixNano()/100)
-}
-
-// Returns UUID based on hashing of namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
- return u
-}
diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go
deleted file mode 100644
index 56759d390..000000000
--- a/vendor/github.com/satori/go.uuid/sql.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "database/sql/driver"
- "fmt"
-)
-
-// Value implements the driver.Valuer interface.
-func (u UUID) Value() (driver.Value, error) {
- return u.String(), nil
-}
-
-// Scan implements the sql.Scanner interface.
-// A 16-byte slice is handled by UnmarshalBinary, while
-// a longer byte slice or a string is handled by UnmarshalText.
-func (u *UUID) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- if len(src) == Size {
- return u.UnmarshalBinary(src)
- }
- return u.UnmarshalText(src)
-
- case string:
- return u.UnmarshalText([]byte(src))
- }
-
- return fmt.Errorf("uuid: cannot convert %T to UUID", src)
-}
-
-// NullUUID can be used with the standard sql package to represent a
-// UUID value that can be NULL in the database
-type NullUUID struct {
- UUID UUID
- Valid bool
-}
-
-// Value implements the driver.Valuer interface.
-func (u NullUUID) Value() (driver.Value, error) {
- if !u.Valid {
- return nil, nil
- }
- // Delegate to UUID Value function
- return u.UUID.Value()
-}
-
-// Scan implements the sql.Scanner interface.
-func (u *NullUUID) Scan(src interface{}) error {
- if src == nil {
- u.UUID, u.Valid = Nil, false
- return nil
- }
-
- // Delegate to UUID Scan function
- u.Valid = true
- return u.UUID.Scan(src)
-}
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
deleted file mode 100644
index a2b8e2ca2..000000000
--- a/vendor/github.com/satori/go.uuid/uuid.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// Package uuid provides implementation of Universally Unique Identifier (UUID).
-// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
-// version 2 (as specified in DCE 1.1).
-package uuid
-
-import (
- "bytes"
- "encoding/hex"
-)
-
-// Size of a UUID in bytes.
-const Size = 16
-
-// UUID representation compliant with specification
-// described in RFC 4122.
-type UUID [Size]byte
-
-// UUID versions
-const (
- _ byte = iota
- V1
- V2
- V3
- V4
- V5
-)
-
-// UUID layout variants.
-const (
- VariantNCS byte = iota
- VariantRFC4122
- VariantMicrosoft
- VariantFuture
-)
-
-// UUID DCE domains.
-const (
- DomainPerson = iota
- DomainGroup
- DomainOrg
-)
-
-// String parse helpers.
-var (
- urnPrefix = []byte("urn:uuid:")
- byteGroups = []int{8, 4, 4, 4, 12}
-)
-
-// Nil is special form of UUID that is specified to have all
-// 128 bits set to zero.
-var Nil = UUID{}
-
-// Predefined namespace UUIDs.
-var (
- NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
-)
-
-// Equal returns true if u1 and u2 equals, otherwise returns false.
-func Equal(u1 UUID, u2 UUID) bool {
- return bytes.Equal(u1[:], u2[:])
-}
-
-// Version returns algorithm version used to generate UUID.
-func (u UUID) Version() byte {
- return u[6] >> 4
-}
-
-// Variant returns UUID layout variant.
-func (u UUID) Variant() byte {
- switch {
- case (u[8] >> 7) == 0x00:
- return VariantNCS
- case (u[8] >> 6) == 0x02:
- return VariantRFC4122
- case (u[8] >> 5) == 0x06:
- return VariantMicrosoft
- case (u[8] >> 5) == 0x07:
- fallthrough
- default:
- return VariantFuture
- }
-}
-
-// Bytes returns bytes slice representation of UUID.
-func (u UUID) Bytes() []byte {
- return u[:]
-}
-
-// Returns canonical string representation of UUID:
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
-func (u UUID) String() string {
- buf := make([]byte, 36)
-
- hex.Encode(buf[0:8], u[0:4])
- buf[8] = '-'
- hex.Encode(buf[9:13], u[4:6])
- buf[13] = '-'
- hex.Encode(buf[14:18], u[6:8])
- buf[18] = '-'
- hex.Encode(buf[19:23], u[8:10])
- buf[23] = '-'
- hex.Encode(buf[24:], u[10:])
-
- return string(buf)
-}
-
-// SetVersion sets version bits.
-func (u *UUID) SetVersion(v byte) {
- u[6] = (u[6] & 0x0f) | (v << 4)
-}
-
-// SetVariant sets variant bits.
-func (u *UUID) SetVariant(v byte) {
- switch v {
- case VariantNCS:
- u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
- case VariantRFC4122:
- u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
- case VariantMicrosoft:
- u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
- case VariantFuture:
- fallthrough
- default:
- u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
- }
-}
-
-// Must is a helper that wraps a call to a function returning (UUID, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
-func Must(u UUID, err error) UUID {
- if err != nil {
- panic(err)
- }
- return u
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/.travis.yml b/vendor/github.com/spacemonkeygo/spacelog/.travis.yml
deleted file mode 100644
index d2b67f69c..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.7
- - 1.8
- - tip
diff --git a/vendor/github.com/spacemonkeygo/spacelog/LICENSE b/vendor/github.com/spacemonkeygo/spacelog/LICENSE
deleted file mode 100644
index 37ec93a14..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/spacemonkeygo/spacelog/README.md b/vendor/github.com/spacemonkeygo/spacelog/README.md
deleted file mode 100644
index 28033f68d..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# spacelog [![Build Status](https://api.travis-ci.org/spacemonkeygo/spacelog.svg?branch=master)](https://travis-ci.org/spacemonkeygo/spacelog)
-
-Please see http://godoc.org/github.com/spacemonkeygo/spacelog for info
-
-### License
-
-Copyright (C) 2014 Space Monkey, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture.go b/vendor/github.com/spacemonkeygo/spacelog/capture.go
deleted file mode 100644
index d7ea1ca31..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/capture.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "fmt"
- "os"
- "os/exec"
-)
-
-// CaptureOutputToFile opens a filehandle using the given path, then calls
-// CaptureOutputToFd on the associated filehandle.
-func CaptureOutputToFile(path string) error {
- fh, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return err
- }
- defer fh.Close()
- return CaptureOutputToFd(int(fh.Fd()))
-}
-
-// CaptureOutputToProcess starts a process and using CaptureOutputToFd,
-// redirects stdout and stderr to the subprocess' stdin.
-// CaptureOutputToProcess expects the subcommand to last the lifetime of the
-// process, and if the subprocess dies, will panic.
-func CaptureOutputToProcess(command string, args ...string) error {
- cmd := exec.Command(command, args...)
- out, err := cmd.StdinPipe()
- if err != nil {
- return err
- }
- defer out.Close()
- type fder interface {
- Fd() uintptr
- }
- out_fder, ok := out.(fder)
- if !ok {
- return fmt.Errorf("unable to get underlying pipe")
- }
- err = CaptureOutputToFd(int(out_fder.Fd()))
- if err != nil {
- return err
- }
- err = cmd.Start()
- if err != nil {
- return err
- }
- go func() {
- err := cmd.Wait()
- if err != nil {
- panic(fmt.Errorf("captured output process died! %s", err))
- }
- }()
- return nil
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_ae.go b/vendor/github.com/spacemonkeygo/spacelog/capture_ae.go
deleted file mode 100644
index f759b6f13..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/capture_ae.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) 2016 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build appengine
-
-package spacelog
-
-import (
- "fmt"
-)
-
-func CaptureOutputToFd(fd int) error {
- return fmt.Errorf("CaptureOutputToFd not supported on App Engine")
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_linux.go b/vendor/github.com/spacemonkeygo/spacelog/capture_linux.go
deleted file mode 100644
index 34a9c0898..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/capture_linux.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !appengine
-
-package spacelog
-
-import (
- "syscall"
-)
-
-// CaptureOutputToFd redirects the current process' stdout and stderr file
-// descriptors to the given file descriptor, using the dup3 syscall.
-func CaptureOutputToFd(fd int) error {
- err := syscall.Dup3(fd, syscall.Stdout, 0)
- if err != nil {
- return err
- }
- err = syscall.Dup3(fd, syscall.Stderr, 0)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_other.go b/vendor/github.com/spacemonkeygo/spacelog/capture_other.go
deleted file mode 100644
index 6c65051a1..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/capture_other.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-// +build !linux
-// +build !appengine
-// +build !solaris
-
-package spacelog
-
-import (
- "syscall"
-)
-
-// CaptureOutputToFd redirects the current process' stdout and stderr file
-// descriptors to the given file descriptor, using the dup2 syscall.
-func CaptureOutputToFd(fd int) error {
- err := syscall.Dup2(fd, syscall.Stdout)
- if err != nil {
- return err
- }
- err = syscall.Dup2(fd, syscall.Stderr)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go b/vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go
deleted file mode 100644
index d77e4f2d1..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// CaptureOutputToFd redirects the current process' stdout and stderr file
-// descriptors to the given file descriptor, using the dup2 syscall.
-func CaptureOutputToFd(fd int) error {
- err := unix.Dup2(fd, unix.Stdout)
- if err != nil {
- return err
- }
- err = unix.Dup2(fd, unix.Stderr)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/collection.go b/vendor/github.com/spacemonkeygo/spacelog/collection.go
deleted file mode 100644
index 8231b4a53..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/collection.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "regexp"
- "runtime"
- "strings"
- "sync"
- "text/template"
-)
-
-var (
- // If set, these prefixes will be stripped out of automatic logger names.
- IgnoredPrefixes []string
-
- badChars = regexp.MustCompile("[^a-zA-Z0-9_.-]")
- slashes = regexp.MustCompile("[/]")
-)
-
-func callerName() string {
- pc, _, _, ok := runtime.Caller(2)
- if !ok {
- return "unknown.unknown"
- }
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown.unknown"
- }
- name := f.Name()
- for _, prefix := range IgnoredPrefixes {
- name = strings.TrimPrefix(name, prefix)
- }
- return badChars.ReplaceAllLiteralString(
- slashes.ReplaceAllLiteralString(name, "."), "_")
-}
-
-// LoggerCollections contain all of the loggers a program might use. Typically
-// a codebase will just use the default logger collection.
-type LoggerCollection struct {
- mtx sync.Mutex
- loggers map[string]*Logger
- level LogLevel
- handler Handler
-}
-
-// NewLoggerCollection creates a new logger collection. It's unlikely you will
-// ever practically need this method. Use the DefaultLoggerCollection instead.
-func NewLoggerCollection() *LoggerCollection {
- return &LoggerCollection{
- loggers: make(map[string]*Logger),
- level: DefaultLevel,
- handler: defaultHandler}
-}
-
-// GetLogger returns a new Logger with a name automatically generated using
-// the callstack. If you want to avoid automatic name generation check out
-// GetLoggerNamed
-func (c *LoggerCollection) GetLogger() *Logger {
- return c.GetLoggerNamed(callerName())
-}
-
-func (c *LoggerCollection) getLogger(name string, level LogLevel,
- handler Handler) *Logger {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- logger, exists := c.loggers[name]
- if !exists {
- logger = &Logger{level: level,
- collection: c,
- name: name,
- handler: handler}
- c.loggers[name] = logger
- }
- return logger
-}
-
-// ConfigureLoggers configures loggers according to the given string
-// specification, which specifies a set of loggers and their associated
-// logging levels. Loggers are semicolon-separated; each
-// configuration is specified as =. White space outside of
-// logger names and levels is ignored. The default level is specified
-// with the name "DEFAULT".
-//
-// An example specification:
-// `DEFAULT=ERROR; foo.bar=WARNING`
-func (c *LoggerCollection) ConfigureLoggers(specification string) error {
- confs := strings.Split(strings.TrimSpace(specification), ";")
- for i := range confs {
- conf := strings.SplitN(confs[i], "=", 2)
- levelstr := strings.TrimSpace(conf[1])
- name := strings.TrimSpace(conf[0])
- level, err := LevelFromString(levelstr)
- if err != nil {
- return err
- }
- if name == "DEFAULT" {
- c.SetLevel(nil, level)
- continue
- }
- logger := c.GetLoggerNamed(name)
- logger.setLevel(level)
- }
- return nil
-}
-
-// GetLoggerNamed returns a new Logger with the provided name. GetLogger is
-// more frequently used.
-func (c *LoggerCollection) GetLoggerNamed(name string) *Logger {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- logger, exists := c.loggers[name]
- if !exists {
- logger = &Logger{level: c.level,
- collection: c,
- name: name,
- handler: c.handler}
- c.loggers[name] = logger
- }
- return logger
-}
-
-// SetLevel will set the current log level for all loggers with names that
-// match a provided regular expression. If the regular expression is nil, then
-// all loggers match.
-func (c *LoggerCollection) SetLevel(re *regexp.Regexp, level LogLevel) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- if re == nil {
- c.level = level
- }
- for name, logger := range c.loggers {
- if re == nil || re.MatchString(name) {
- logger.setLevel(level)
- }
- }
-}
-
-// SetHandler will set the current log handler for all loggers with names that
-// match a provided regular expression. If the regular expression is nil, then
-// all loggers match.
-func (c *LoggerCollection) SetHandler(re *regexp.Regexp, handler Handler) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- if re == nil {
- c.handler = handler
- }
- for name, logger := range c.loggers {
- if re == nil || re.MatchString(name) {
- logger.setHandler(handler)
- }
- }
-}
-
-// SetTextTemplate will set the current text template for all loggers with
-// names that match a provided regular expression. If the regular expression
-// is nil, then all loggers match. Note that not every handler is guaranteed
-// to support text templates and a text template will only apply to
-// text-oriented and unstructured handlers.
-func (c *LoggerCollection) SetTextTemplate(re *regexp.Regexp,
- t *template.Template) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- if re == nil {
- c.handler.SetTextTemplate(t)
- }
- for name, logger := range c.loggers {
- if re == nil || re.MatchString(name) {
- logger.getHandler().SetTextTemplate(t)
- }
- }
-}
-
-// SetTextOutput will set the current output interface for all loggers with
-// names that match a provided regular expression. If the regular expression
-// is nil, then all loggers match. Note that not every handler is guaranteed
-// to support text output and a text output interface will only apply to
-// text-oriented and unstructured handlers.
-func (c *LoggerCollection) SetTextOutput(re *regexp.Regexp,
- output TextOutput) {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- if re == nil {
- c.handler.SetTextOutput(output)
- }
- for name, logger := range c.loggers {
- if re == nil || re.MatchString(name) {
- logger.getHandler().SetTextOutput(output)
- }
- }
-}
-
-var (
- // It's unlikely you'll need to use this directly
- DefaultLoggerCollection = NewLoggerCollection()
-)
-
-// GetLogger returns an automatically-named logger on the default logger
-// collection.
-func GetLogger() *Logger {
- return DefaultLoggerCollection.GetLoggerNamed(callerName())
-}
-
-// GetLoggerNamed returns a new Logger with the provided name on the default
-// logger collection. GetLogger is more frequently used.
-func GetLoggerNamed(name string) *Logger {
- return DefaultLoggerCollection.GetLoggerNamed(name)
-}
-
-// ConfigureLoggers configures loggers according to the given string
-// specification, which specifies a set of loggers and their associated
-// logging levels. Loggers are colon- or semicolon-separated; each
-// configuration is specified as =. White space outside of
-// logger names and levels is ignored. The DEFAULT module is specified
-// with the name "DEFAULT".
-//
-// An example specification:
-// `DEFAULT=ERROR; foo.bar=WARNING`
-func ConfigureLoggers(specification string) error {
- return DefaultLoggerCollection.ConfigureLoggers(specification)
-}
-
-// SetLevel will set the current log level for all loggers on the default
-// collection with names that match a provided regular expression. If the
-// regular expression is nil, then all loggers match.
-func SetLevel(re *regexp.Regexp, level LogLevel) {
- DefaultLoggerCollection.SetLevel(re, level)
-}
-
-// SetHandler will set the current log handler for all loggers on the default
-// collection with names that match a provided regular expression. If the
-// regular expression is nil, then all loggers match.
-func SetHandler(re *regexp.Regexp, handler Handler) {
- DefaultLoggerCollection.SetHandler(re, handler)
-}
-
-// SetTextTemplate will set the current text template for all loggers on the
-// default collection with names that match a provided regular expression. If
-// the regular expression is nil, then all loggers match. Note that not every
-// handler is guaranteed to support text templates and a text template will
-// only apply to text-oriented and unstructured handlers.
-func SetTextTemplate(re *regexp.Regexp, t *template.Template) {
- DefaultLoggerCollection.SetTextTemplate(re, t)
-}
-
-// SetTextOutput will set the current output interface for all loggers on the
-// default collection with names that match a provided regular expression. If
-// the regular expression is nil, then all loggers match. Note that not every
-// handler is guaranteed to support text output and a text output interface
-// will only apply to text-oriented and unstructured handlers.
-func SetTextOutput(re *regexp.Regexp, output TextOutput) {
- DefaultLoggerCollection.SetTextOutput(re, output)
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/convenience.go b/vendor/github.com/spacemonkeygo/spacelog/convenience.go
deleted file mode 100644
index b3056329a..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/convenience.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "fmt"
- "io"
-)
-
-// Trace logs a collection of values if the logger's level is trace or even
-// more permissive.
-func (l *Logger) Trace(v ...interface{}) {
- if l.getLevel() <= Trace {
- l.getHandler().Log(l.name, Trace, fmt.Sprint(v...), 1)
- }
-}
-
-// Tracef logs a format string with values if the logger's level is trace or
-// even more permissive.
-func (l *Logger) Tracef(format string, v ...interface{}) {
- if l.getLevel() <= Trace {
- l.getHandler().Log(l.name, Trace, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Tracee logs an error value if the error is not nil and the logger's level
-// is trace or even more permissive.
-func (l *Logger) Tracee(err error) {
- if l.getLevel() <= Trace && err != nil {
- l.getHandler().Log(l.name, Trace, err.Error(), 1)
- }
-}
-
-// TraceEnabled returns true if the logger's level is trace or even more
-// permissive.
-func (l *Logger) TraceEnabled() bool {
- return l.getLevel() <= Trace
-}
-
-// Debug logs a collection of values if the logger's level is debug or even
-// more permissive.
-func (l *Logger) Debug(v ...interface{}) {
- if l.getLevel() <= Debug {
- l.getHandler().Log(l.name, Debug, fmt.Sprint(v...), 1)
- }
-}
-
-// Debugf logs a format string with values if the logger's level is debug or
-// even more permissive.
-func (l *Logger) Debugf(format string, v ...interface{}) {
- if l.getLevel() <= Debug {
- l.getHandler().Log(l.name, Debug, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Debuge logs an error value if the error is not nil and the logger's level
-// is debug or even more permissive.
-func (l *Logger) Debuge(err error) {
- if l.getLevel() <= Debug && err != nil {
- l.getHandler().Log(l.name, Debug, err.Error(), 1)
- }
-}
-
-// DebugEnabled returns true if the logger's level is debug or even more
-// permissive.
-func (l *Logger) DebugEnabled() bool {
- return l.getLevel() <= Debug
-}
-
-// Info logs a collection of values if the logger's level is info or even
-// more permissive.
-func (l *Logger) Info(v ...interface{}) {
- if l.getLevel() <= Info {
- l.getHandler().Log(l.name, Info, fmt.Sprint(v...), 1)
- }
-}
-
-// Infof logs a format string with values if the logger's level is info or
-// even more permissive.
-func (l *Logger) Infof(format string, v ...interface{}) {
- if l.getLevel() <= Info {
- l.getHandler().Log(l.name, Info, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Infoe logs an error value if the error is not nil and the logger's level
-// is info or even more permissive.
-func (l *Logger) Infoe(err error) {
- if l.getLevel() <= Info && err != nil {
- l.getHandler().Log(l.name, Info, err.Error(), 1)
- }
-}
-
-// InfoEnabled returns true if the logger's level is info or even more
-// permissive.
-func (l *Logger) InfoEnabled() bool {
- return l.getLevel() <= Info
-}
-
-// Notice logs a collection of values if the logger's level is notice or even
-// more permissive.
-func (l *Logger) Notice(v ...interface{}) {
- if l.getLevel() <= Notice {
- l.getHandler().Log(l.name, Notice, fmt.Sprint(v...), 1)
- }
-}
-
-// Noticef logs a format string with values if the logger's level is notice or
-// even more permissive.
-func (l *Logger) Noticef(format string, v ...interface{}) {
- if l.getLevel() <= Notice {
- l.getHandler().Log(l.name, Notice, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Noticee logs an error value if the error is not nil and the logger's level
-// is notice or even more permissive.
-func (l *Logger) Noticee(err error) {
- if l.getLevel() <= Notice && err != nil {
- l.getHandler().Log(l.name, Notice, err.Error(), 1)
- }
-}
-
-// NoticeEnabled returns true if the logger's level is notice or even more
-// permissive.
-func (l *Logger) NoticeEnabled() bool {
- return l.getLevel() <= Notice
-}
-
-// Warn logs a collection of values if the logger's level is warning or even
-// more permissive.
-func (l *Logger) Warn(v ...interface{}) {
- if l.getLevel() <= Warning {
- l.getHandler().Log(l.name, Warning, fmt.Sprint(v...), 1)
- }
-}
-
-// Warnf logs a format string with values if the logger's level is warning or
-// even more permissive.
-func (l *Logger) Warnf(format string, v ...interface{}) {
- if l.getLevel() <= Warning {
- l.getHandler().Log(l.name, Warning, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Warne logs an error value if the error is not nil and the logger's level
-// is warning or even more permissive.
-func (l *Logger) Warne(err error) {
- if l.getLevel() <= Warning && err != nil {
- l.getHandler().Log(l.name, Warning, err.Error(), 1)
- }
-}
-
-// WarnEnabled returns true if the logger's level is warning or even more
-// permissive.
-func (l *Logger) WarnEnabled() bool {
- return l.getLevel() <= Warning
-}
-
-// Error logs a collection of values if the logger's level is error or even
-// more permissive.
-func (l *Logger) Error(v ...interface{}) {
- if l.getLevel() <= Error {
- l.getHandler().Log(l.name, Error, fmt.Sprint(v...), 1)
- }
-}
-
-// Errorf logs a format string with values if the logger's level is error or
-// even more permissive.
-func (l *Logger) Errorf(format string, v ...interface{}) {
- if l.getLevel() <= Error {
- l.getHandler().Log(l.name, Error, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Errore logs an error value if the error is not nil and the logger's level
-// is error or even more permissive.
-func (l *Logger) Errore(err error) {
- if l.getLevel() <= Error && err != nil {
- l.getHandler().Log(l.name, Error, err.Error(), 1)
- }
-}
-
-// ErrorEnabled returns true if the logger's level is error or even more
-// permissive.
-func (l *Logger) ErrorEnabled() bool {
- return l.getLevel() <= Error
-}
-
-// Crit logs a collection of values if the logger's level is critical or even
-// more permissive.
-func (l *Logger) Crit(v ...interface{}) {
- if l.getLevel() <= Critical {
- l.getHandler().Log(l.name, Critical, fmt.Sprint(v...), 1)
- }
-}
-
-// Critf logs a format string with values if the logger's level is critical or
-// even more permissive.
-func (l *Logger) Critf(format string, v ...interface{}) {
- if l.getLevel() <= Critical {
- l.getHandler().Log(l.name, Critical, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Crite logs an error value if the error is not nil and the logger's level
-// is critical or even more permissive.
-func (l *Logger) Crite(err error) {
- if l.getLevel() <= Critical && err != nil {
- l.getHandler().Log(l.name, Critical, err.Error(), 1)
- }
-}
-
-// CritEnabled returns true if the logger's level is critical or even more
-// permissive.
-func (l *Logger) CritEnabled() bool {
- return l.getLevel() <= Critical
-}
-
-// Log logs a collection of values if the logger's level is the provided level
-// or even more permissive.
-func (l *Logger) Log(level LogLevel, v ...interface{}) {
- if l.getLevel() <= level {
- l.getHandler().Log(l.name, level, fmt.Sprint(v...), 1)
- }
-}
-
-// Logf logs a format string with values if the logger's level is the provided
-// level or even more permissive.
-func (l *Logger) Logf(level LogLevel, format string, v ...interface{}) {
- if l.getLevel() <= level {
- l.getHandler().Log(l.name, level, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Loge logs an error value if the error is not nil and the logger's level
-// is the provided level or even more permissive.
-func (l *Logger) Loge(level LogLevel, err error) {
- if l.getLevel() <= level && err != nil {
- l.getHandler().Log(l.name, level, err.Error(), 1)
- }
-}
-
-// LevelEnabled returns true if the logger's level is the provided level or
-// even more permissive.
-func (l *Logger) LevelEnabled(level LogLevel) bool {
- return l.getLevel() <= level
-}
-
-type writer struct {
- l *Logger
- level LogLevel
-}
-
-func (w *writer) Write(data []byte) (int, error) {
- if w.l.getLevel() <= w.level {
- w.l.getHandler().Log(w.l.name, w.level, string(data), 1)
- }
- return len(data), nil
-}
-
-// Writer returns an io.Writer that writes messages at the given log level.
-func (l *Logger) Writer(level LogLevel) io.Writer {
- return &writer{l: l, level: level}
-}
-
-type writerNoCaller struct {
- l *Logger
- level LogLevel
-}
-
-func (w *writerNoCaller) Write(data []byte) (int, error) {
- if w.l.getLevel() <= w.level {
- w.l.getHandler().Log(w.l.name, w.level, string(data), -1)
- }
- return len(data), nil
-}
-
-// WriterWithoutCaller returns an io.Writer that writes messages at the given
-// log level, but does not attempt to collect the Write caller, and provides
-// no caller information to the log event.
-func (l *Logger) WriterWithoutCaller(level LogLevel) io.Writer {
- return &writerNoCaller{l: l, level: level}
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/doc.go b/vendor/github.com/spacemonkeygo/spacelog/doc.go
deleted file mode 100644
index 28c25b4db..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/doc.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package spacelog is a collection of interface lego bricks designed to help you
-build a flexible logging system.
-
-spacelog is loosely inspired by the Python logging library.
-
-The basic interaction is between a Logger and a Handler. A Logger is
-what the programmer typically interacts with for creating log messages. A
-Logger will be at a given log level, and if log messages can clear that
-specific logger's log level filter, they will be passed off to the Handler.
-
-Loggers are instantiated from GetLogger and GetLoggerNamed.
-
-A Handler is a very generic interface for handling log events. You can provide
-your own Handler for doing structured JSON output or colorized output or
-countless other things.
-
-Provided are a simple TextHandler with a variety of log event templates and
-TextOutput sinks, such as io.Writer, Syslog, and so forth.
-
-Make sure to see the source of the setup subpackage for an example of easy and
-configurable logging setup at process start:
- http://godoc.org/github.com/spacemonkeygo/spacelog/setup
-*/
-package spacelog
diff --git a/vendor/github.com/spacemonkeygo/spacelog/event.go b/vendor/github.com/spacemonkeygo/spacelog/event.go
deleted file mode 100644
index da863cbf2..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/event.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "path/filepath"
- "strings"
- "time"
-)
-
-// TermColors is a type that knows how to output terminal colors and formatting
-type TermColors struct{}
-
-// LogEvent is a type made by the default text handler for feeding to log
-// templates. It has as much contextual data about the log event as possible.
-type LogEvent struct {
- LoggerName string
- Level LogLevel
- Message string
- Filepath string
- Line int
- Timestamp time.Time
-
- TermColors
-}
-
-// Reset resets the color palette for terminals that support color
-func (TermColors) Reset() string { return "\x1b[0m" }
-func (TermColors) Bold() string { return "\x1b[1m" }
-func (TermColors) Underline() string { return "\x1b[4m" }
-func (TermColors) Black() string { return "\x1b[30m" }
-func (TermColors) Red() string { return "\x1b[31m" }
-func (TermColors) Green() string { return "\x1b[32m" }
-func (TermColors) Yellow() string { return "\x1b[33m" }
-func (TermColors) Blue() string { return "\x1b[34m" }
-func (TermColors) Magenta() string { return "\x1b[35m" }
-func (TermColors) Cyan() string { return "\x1b[36m" }
-func (TermColors) White() string { return "\x1b[37m" }
-
-func (l *LogEvent) Filename() string {
- if l.Filepath == "" {
- return ""
- }
- return filepath.Base(l.Filepath)
-}
-
-func (l *LogEvent) Time() string {
- return l.Timestamp.Format("15:04:05")
-}
-
-func (l *LogEvent) Date() string {
- return l.Timestamp.Format("2006/01/02")
-}
-
-// LevelJustified returns the log level in string form justified so that all
-// log levels take the same text width.
-func (l *LogEvent) LevelJustified() (rv string) {
- rv = l.Level.String()
- if len(rv) < 5 {
- rv += strings.Repeat(" ", 5-len(rv))
- }
- return rv
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/handler.go b/vendor/github.com/spacemonkeygo/spacelog/handler.go
deleted file mode 100644
index e3db08654..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/handler.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "text/template"
-)
-
-// Handler is an interface that knows how to process log events. This is the
-// basic interface type for building a logging system. If you want to route
-// structured log data somewhere, you would implement this interface.
-type Handler interface {
- // Log is called for every message. if calldepth is negative, caller
- // information is missing
- Log(logger_name string, level LogLevel, msg string, calldepth int)
-
- // These two calls are expected to be no-ops on non-text-output handlers
- SetTextTemplate(t *template.Template)
- SetTextOutput(output TextOutput)
-}
-
-// HandlerFunc is a type to make implementation of the Handler interface easier
-type HandlerFunc func(logger_name string, level LogLevel, msg string,
- calldepth int)
-
-// Log simply calls f(logger_name, level, msg, calldepth)
-func (f HandlerFunc) Log(logger_name string, level LogLevel, msg string,
- calldepth int) {
- f(logger_name, level, msg, calldepth)
-}
-
-// SetTextTemplate is a no-op
-func (HandlerFunc) SetTextTemplate(t *template.Template) {}
-
-// SetTextOutput is a no-op
-func (HandlerFunc) SetTextOutput(output TextOutput) {}
-
-var (
- defaultHandler = NewTextHandler(StdlibTemplate,
- &StdlibOutput{})
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/level.go b/vendor/github.com/spacemonkeygo/spacelog/level.go
deleted file mode 100644
index bf5070752..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/level.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-type LogLevel int32
-
-const (
- Trace LogLevel = 5
- Debug LogLevel = 10
- Info LogLevel = 20
- Notice LogLevel = 30
- Warning LogLevel = 40
- Error LogLevel = 50
- Critical LogLevel = 60
- // syslog has Alert
- // syslog has Emerg
-
- DefaultLevel = Notice
-)
-
-// String returns the log level name in short form
-func (l LogLevel) String() string {
- switch l.Match() {
- case Critical:
- return "CRIT"
- case Error:
- return "ERR"
- case Warning:
- return "WARN"
- case Notice:
- return "NOTE"
- case Info:
- return "INFO"
- case Debug:
- return "DEBUG"
- case Trace:
- return "TRACE"
- default:
- return "UNSET"
- }
-}
-
-// String returns the log level name in long human readable form
-func (l LogLevel) Name() string {
- switch l.Match() {
- case Critical:
- return "critical"
- case Error:
- return "error"
- case Warning:
- return "warning"
- case Notice:
- return "notice"
- case Info:
- return "info"
- case Debug:
- return "debug"
- case Trace:
- return "trace"
- default:
- return "unset"
- }
-}
-
-// Match returns the greatest named log level that is less than or equal to
-// the receiver log level. For example, if the log level is 43, Match() will
-// return 40 (Warning)
-func (l LogLevel) Match() LogLevel {
- if l >= Critical {
- return Critical
- }
- if l >= Error {
- return Error
- }
- if l >= Warning {
- return Warning
- }
- if l >= Notice {
- return Notice
- }
- if l >= Info {
- return Info
- }
- if l >= Debug {
- return Debug
- }
- if l >= Trace {
- return Trace
- }
- return 0
-}
-
-// LevelFromString will convert a named log level to its corresponding value
-// type, or error if both the name was unknown and an integer value was unable
-// to be parsed.
-func LevelFromString(str string) (LogLevel, error) {
- switch strings.ToLower(str) {
- case "crit", "critical":
- return Critical, nil
- case "err", "error":
- return Error, nil
- case "warn", "warning":
- return Warning, nil
- case "note", "notice":
- return Notice, nil
- case "info":
- return Info, nil
- case "debug":
- return Debug, nil
- case "trace":
- return Trace, nil
- }
- val, err := strconv.ParseInt(str, 10, 32)
- if err == nil {
- return LogLevel(val), nil
- }
- return 0, fmt.Errorf("Invalid log level: %s", str)
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/logger.go b/vendor/github.com/spacemonkeygo/spacelog/logger.go
deleted file mode 100644
index ae1734b27..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/logger.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// Logger is the basic type that allows for logging. A logger has an associated
-// name, given to it during construction, either through a logger collection,
-// GetLogger, GetLoggerNamed, or another Logger's Scope method. A logger also
-// has an associated level and handler, typically configured through the logger
-// collection to which it belongs.
-type Logger struct {
- level LogLevel
- name string
- collection *LoggerCollection
-
- handler_mtx sync.RWMutex
- handler Handler
-}
-
-// Scope returns a new Logger with the same level and handler, using the
-// receiver Logger's name as a prefix.
-func (l *Logger) Scope(name string) *Logger {
- return l.collection.getLogger(l.name+"."+name, l.getLevel(),
- l.getHandler())
-}
-
-func (l *Logger) setLevel(level LogLevel) {
- atomic.StoreInt32((*int32)(&l.level), int32(level))
-}
-
-func (l *Logger) getLevel() LogLevel {
- return LogLevel(atomic.LoadInt32((*int32)(&l.level)))
-}
-
-func (l *Logger) setHandler(handler Handler) {
- l.handler_mtx.Lock()
- defer l.handler_mtx.Unlock()
- l.handler = handler
-}
-
-func (l *Logger) getHandler() Handler {
- l.handler_mtx.RLock()
- defer l.handler_mtx.RUnlock()
- return l.handler
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/output.go b/vendor/github.com/spacemonkeygo/spacelog/output.go
deleted file mode 100644
index 8751268fb..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/output.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "bytes"
- "fmt"
- "io"
- "log"
- "os"
- "sync"
-)
-
-type TextOutput interface {
- Output(LogLevel, []byte)
-}
-
-// WriterOutput is an io.Writer wrapper that matches the TextOutput interface
-type WriterOutput struct {
- w io.Writer
-}
-
-// NewWriterOutput returns a TextOutput that writes messages to an io.Writer
-func NewWriterOutput(w io.Writer) *WriterOutput {
- return &WriterOutput{w: w}
-}
-
-func (o *WriterOutput) Output(_ LogLevel, message []byte) {
- o.w.Write(append(bytes.TrimRight(message, "\r\n"), platformNewline...))
-}
-
-// StdlibOutput is a TextOutput that simply writes to the default Go stdlib
-// logging system. It is the default. If you configure the Go stdlib to write
-// to spacelog, make sure to provide a new TextOutput to your logging
-// collection
-type StdlibOutput struct{}
-
-func (*StdlibOutput) Output(_ LogLevel, message []byte) {
- log.Print(string(message))
-}
-
-type bufferMsg struct {
- level LogLevel
- message []byte
-}
-
-// BufferedOutput uses a channel to synchronize writes to a wrapped TextOutput
-// and allows for buffering a limited amount of log events.
-type BufferedOutput struct {
- o TextOutput
- c chan bufferMsg
- running sync.Mutex
- close_once sync.Once
-}
-
-// NewBufferedOutput returns a BufferedOutput wrapping output with a buffer
-// size of buffer.
-func NewBufferedOutput(output TextOutput, buffer int) *BufferedOutput {
- if buffer < 0 {
- buffer = 0
- }
- b := &BufferedOutput{
- o: output,
- c: make(chan bufferMsg, buffer)}
- go b.process()
- return b
-}
-
-// Close shuts down the BufferedOutput's processing
-func (b *BufferedOutput) Close() {
- b.close_once.Do(func() {
- close(b.c)
- })
- b.running.Lock()
- b.running.Unlock()
-}
-
-func (b *BufferedOutput) Output(level LogLevel, message []byte) {
- b.c <- bufferMsg{level: level, message: message}
-}
-
-func (b *BufferedOutput) process() {
- b.running.Lock()
- defer b.running.Unlock()
- for {
- msg, open := <-b.c
- if !open {
- break
- }
- b.o.Output(msg.level, msg.message)
- }
-}
-
-// A TextOutput object that also implements HupHandlingTextOutput may have its
-// OnHup() method called when an administrative signal is sent to this process.
-type HupHandlingTextOutput interface {
- TextOutput
- OnHup()
-}
-
-// FileWriterOutput is like WriterOutput with a plain file handle, but it
-// knows how to reopen the file (or try to reopen it) if it hasn't been able
-// to open the file previously, or if an appropriate signal has been received.
-type FileWriterOutput struct {
- *WriterOutput
- path string
-}
-
-// Creates a new FileWriterOutput object. This is the only case where an
-// error opening the file will be reported to the caller; if we try to
-// reopen it later and the reopen fails, we'll just keep trying until it
-// works.
-func NewFileWriterOutput(path string) (*FileWriterOutput, error) {
- fo := &FileWriterOutput{path: path}
- fh, err := fo.openFile()
- if err != nil {
- return nil, err
- }
- fo.WriterOutput = NewWriterOutput(fh)
- return fo, nil
-}
-
-// Try to open the file with the path associated with this object.
-func (fo *FileWriterOutput) openFile() (*os.File, error) {
- return os.OpenFile(fo.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
-}
-
-// Try to communicate a message without using our log file. In all likelihood,
-// stderr is closed or redirected to /dev/null, but at least we can try
-// writing there. In the very worst case, if an admin attaches a ptrace to
-// this process, it will be more clear what the problem is.
-func (fo *FileWriterOutput) fallbackLog(tmpl string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, tmpl, args...)
-}
-
-// Output a log line by writing it to the file. If the file has been
-// released, try to open it again. If that fails, cry for a little
-// while, then throw away the message and carry on.
-func (fo *FileWriterOutput) Output(ll LogLevel, message []byte) {
- if fo.WriterOutput == nil {
- fh, err := fo.openFile()
- if err != nil {
- fo.fallbackLog("Could not open %#v: %s", fo.path, err)
- return
- }
- fo.WriterOutput = NewWriterOutput(fh)
- }
- fo.WriterOutput.Output(ll, message)
-}
-
-// Throw away any references/handles to the output file. This probably
-// means the admin wants to rotate the file out and have this process
-// open a new one. Close the underlying io.Writer if that is a thing
-// that it knows how to do.
-func (fo *FileWriterOutput) OnHup() {
- if fo.WriterOutput != nil {
- wc, ok := fo.WriterOutput.w.(io.Closer)
- if ok {
- err := wc.Close()
- if err != nil {
- fo.fallbackLog("Closing %#v failed: %s", fo.path, err)
- }
- }
- fo.WriterOutput = nil
- }
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/output_other.go b/vendor/github.com/spacemonkeygo/spacelog/output_other.go
deleted file mode 100644
index 2be240a17..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/output_other.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package spacelog
-
-var platformNewline = []byte("\n")
diff --git a/vendor/github.com/spacemonkeygo/spacelog/output_windows.go b/vendor/github.com/spacemonkeygo/spacelog/output_windows.go
deleted file mode 100644
index 58b71daba..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/output_windows.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-var platformNewline = []byte("\r\n")
diff --git a/vendor/github.com/spacemonkeygo/spacelog/setup.go b/vendor/github.com/spacemonkeygo/spacelog/setup.go
deleted file mode 100644
index 2c1cbcee6..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/setup.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "bytes"
- "fmt"
- "log"
- "math"
- "os"
- "os/signal"
- "regexp"
- "strings"
- "text/template"
-)
-
-// SetupConfig is a configuration struct meant to be used with
-// github.com/spacemonkeygo/flagfile/utils.Setup
-// but can be used independently.
-type SetupConfig struct {
- Output string `default:"stderr" usage:"log output. can be stdout, stderr, syslog, or a path"`
- Level string `default:"" usage:"base logger level"`
- Filter string `default:"" usage:"sets loggers matching this regular expression to the lowest level"`
- Format string `default:"" usage:"format string to use"`
- Stdlevel string `default:"warn" usage:"logger level for stdlib log integration"`
- Subproc string `default:"" usage:"process to run for stdout/stderr-captured logging. The command is first processed as a Go template that supports {{.Facility}}, {{.Level}}, and {{.Name}} fields, and then passed to sh. If set, will redirect stdout and stderr to the given process. A good default is 'setsid logger --priority {{.Facility}}.{{.Level}} --tag {{.Name}}'"`
- Buffer int `default:"0" usage:"the number of messages to buffer. 0 for no buffer"`
- // Facility defaults to syslog.LOG_USER (which is 8)
- Facility int `default:"8" usage:"the syslog facility to use if syslog output is configured"`
- HupRotate bool `default:"false" usage:"if true, sending a HUP signal will reopen log files"`
- Config string `default:"" usage:"a semicolon separated list of logger=level; sets each log to the corresponding level"`
-}
-
-var (
- stdlog = GetLoggerNamed("stdlog")
- funcmap = template.FuncMap{"ColorizeLevel": ColorizeLevel}
-)
-
-// SetFormatMethod adds functions to the template function map, such that
-// command-line and Setup provided templates can call methods added to the map
-// via this method. The map comes prepopulated with ColorizeLevel, but can be
-// overridden. SetFormatMethod should be called (if at all) before one of
-// this package's Setup methods.
-func SetFormatMethod(name string, fn interface{}) {
- funcmap[name] = fn
-}
-
-// MustSetup is the same as Setup, but panics instead of returning an error
-func MustSetup(procname string, config SetupConfig) {
- err := Setup(procname, config)
- if err != nil {
- panic(err)
- }
-}
-
-type subprocInfo struct {
- Facility string
- Level string
- Name string
-}
-
-// Setup takes a given procname and sets spacelog up with the given
-// configuration. Setup supports:
-// * capturing stdout and stderr to a subprocess
-// * configuring the default level
-// * configuring log filters (enabling only some loggers)
-// * configuring the logging template
-// * configuring the output (a file, syslog, stdout, stderr)
-// * configuring log event buffering
-// * capturing all standard library logging with configurable log level
-// It is expected that this method will be called once at process start.
-func Setup(procname string, config SetupConfig) error {
- if config.Subproc != "" {
- t, err := template.New("subproc").Parse(config.Subproc)
- if err != nil {
- return err
- }
- var buf bytes.Buffer
- err = t.Execute(&buf, &subprocInfo{
- Facility: fmt.Sprintf("%d", config.Facility),
- Level: fmt.Sprintf("%d", 2), // syslog.LOG_CRIT
- Name: procname})
- if err != nil {
- return err
- }
- err = CaptureOutputToProcess("sh", "-c", string(buf.Bytes()))
- if err != nil {
- return err
- }
- }
- if config.Config != "" {
- err := ConfigureLoggers(config.Config)
- if err != nil {
- return err
- }
- }
- if config.Level != "" {
- level_val, err := LevelFromString(config.Level)
- if err != nil {
- return err
- }
- if level_val != DefaultLevel {
- SetLevel(nil, level_val)
- }
- }
- if config.Filter != "" {
- re, err := regexp.Compile(config.Filter)
- if err != nil {
- return err
- }
- SetLevel(re, LogLevel(math.MinInt32))
- }
- var t *template.Template
- if config.Format != "" {
- var err error
- t, err = template.New("user").Funcs(funcmap).Parse(config.Format)
- if err != nil {
- return err
- }
- }
- var textout TextOutput
- switch strings.ToLower(config.Output) {
- case "syslog":
- w, err := NewSyslogOutput(SyslogPriority(config.Facility), procname)
- if err != nil {
- return err
- }
- if t == nil {
- t = SyslogTemplate
- }
- textout = w
- case "stdout":
- if t == nil {
- t = DefaultTemplate
- }
- textout = NewWriterOutput(os.Stdout)
- case "stderr", "":
- if t == nil {
- t = DefaultTemplate
- }
- textout = NewWriterOutput(os.Stderr)
- default:
- if t == nil {
- t = StandardTemplate
- }
- var err error
- textout, err = NewFileWriterOutput(config.Output)
- if err != nil {
- return err
- }
- }
- if config.HupRotate {
- if hh, ok := textout.(HupHandlingTextOutput); ok {
- sigchan := make(chan os.Signal)
- signal.Notify(sigchan, sigHUP)
- go func() {
- for _ = range sigchan {
- hh.OnHup()
- }
- }()
- }
- }
- if config.Buffer > 0 {
- textout = NewBufferedOutput(textout, config.Buffer)
- }
- SetHandler(nil, NewTextHandler(t, textout))
- log.SetFlags(log.Lshortfile)
- if config.Stdlevel == "" {
- config.Stdlevel = "warn"
- }
- stdlog_level_val, err := LevelFromString(config.Stdlevel)
- if err != nil {
- return err
- }
- log.SetOutput(stdlog.WriterWithoutCaller(stdlog_level_val))
- return nil
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go b/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go
deleted file mode 100644
index c12ed9617..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2017 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build appengine
-
-package spacelog
-
-import (
- "strconv"
-)
-
-const (
- sigHUP = syscallSignal(0x1)
-)
-
-type syscallSignal int
-
-func (s syscallSignal) Signal() {}
-
-func (s syscallSignal) String() string {
- switch s {
- case sigHUP:
- return "hangup"
- }
- return "signal " + strconv.Itoa(int(s))
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go b/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go
deleted file mode 100644
index 0e033a8de..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (C) 2017 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !appengine
-
-package spacelog
-
-import "syscall"
-
-const (
- sigHUP = syscall.SIGHUP
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/syslog.go b/vendor/github.com/spacemonkeygo/spacelog/syslog.go
deleted file mode 100644
index c2317b6c8..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/syslog.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package spacelog
-
-import (
- "bytes"
- "log/syslog"
-)
-
-type SyslogPriority syslog.Priority
-
-// SyslogOutput is a syslog client that matches the TextOutput interface
-type SyslogOutput struct {
- w *syslog.Writer
-}
-
-// NewSyslogOutput returns a TextOutput object that writes to syslog using
-// the given facility and tag. The log level will be determined by the log
-// event.
-func NewSyslogOutput(facility SyslogPriority, tag string) (
- TextOutput, error) {
- w, err := syslog.New(syslog.Priority(facility), tag)
- if err != nil {
- return nil, err
- }
- return &SyslogOutput{w: w}, nil
-}
-
-func (o *SyslogOutput) Output(level LogLevel, message []byte) {
- level = level.Match()
- for _, msg := range bytes.Split(message, []byte{'\n'}) {
- switch level {
- case Critical:
- o.w.Crit(string(msg))
- case Error:
- o.w.Err(string(msg))
- case Warning:
- o.w.Warning(string(msg))
- case Notice:
- o.w.Notice(string(msg))
- case Info:
- o.w.Info(string(msg))
- case Debug:
- fallthrough
- case Trace:
- fallthrough
- default:
- o.w.Debug(string(msg))
- }
- }
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates.go b/vendor/github.com/spacemonkeygo/spacelog/templates.go
deleted file mode 100644
index 959033dad..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/templates.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "text/template"
-)
-
-// ColorizeLevel returns a TermColor byte sequence for the appropriate color
-// for the level. If you'd like to configure your own color choices, you can
-// make your own template with its own function map to your own colorize
-// function.
-func ColorizeLevel(level LogLevel) string {
- switch level.Match() {
- case Critical, Error:
- return TermColors{}.Red()
- case Warning:
- return TermColors{}.Magenta()
- case Notice:
- return TermColors{}.Yellow()
- case Info, Debug, Trace:
- return TermColors{}.Green()
- }
- return ""
-}
-
-var (
- // ColorTemplate uses the default ColorizeLevel method for color choices.
- ColorTemplate = template.Must(template.New("color").Funcs(template.FuncMap{
- "ColorizeLevel": ColorizeLevel}).Parse(
- `{{.Blue}}{{.Date}} {{.Time}}{{.Reset}} ` +
- `{{.Bold}}{{ColorizeLevel .Level}}{{.LevelJustified}}{{.Reset}} ` +
- `{{.Underline}}{{.LoggerName}}{{.Reset}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}- ` +
- `{{ColorizeLevel .Level}}{{.Message}}{{.Reset}}`))
-
- // StandardTemplate is like ColorTemplate with no color.
- StandardTemplate = template.Must(template.New("standard").Parse(
- `{{.Date}} {{.Time}} ` +
- `{{.Level}} {{.LoggerName}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
- `- {{.Message}}`))
-
- // SyslogTemplate is missing the date and time as syslog adds those
- // things.
- SyslogTemplate = template.Must(template.New("syslog").Parse(
- `{{.Level}} {{.LoggerName}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
- `- {{.Message}}`))
-
- // StdlibTemplate is missing the date and time as the stdlib logger often
- // adds those things.
- StdlibTemplate = template.Must(template.New("stdlib").Parse(
- `{{.Level}} {{.LoggerName}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
- `- {{.Message}}`))
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates_others.go b/vendor/github.com/spacemonkeygo/spacelog/templates_others.go
deleted file mode 100644
index 114e2e143..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/templates_others.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package spacelog
-
-var (
- // DefaultTemplate is default template for stdout/stderr for the platform
- DefaultTemplate = ColorTemplate
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go b/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go
deleted file mode 100644
index 512b60048..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-var (
- // DefaultTemplate is default template for stdout/stderr for the platform
- DefaultTemplate = StandardTemplate
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/text.go b/vendor/github.com/spacemonkeygo/spacelog/text.go
deleted file mode 100644
index 8b36ce99f..000000000
--- a/vendor/github.com/spacemonkeygo/spacelog/text.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strings"
- "sync"
- "text/template"
- "time"
-)
-
-// TextHandler is the default implementation of the Handler interface. A
-// TextHandler, on log events, makes LogEvent structures, passes them to the
-// configured template, and then passes that output to a configured TextOutput
-// interface.
-type TextHandler struct {
- mtx sync.RWMutex
- template *template.Template
- output TextOutput
-}
-
-// NewTextHandler creates a Handler that creates LogEvents, passes them to
-// the given template, and passes the result to output
-func NewTextHandler(t *template.Template, output TextOutput) *TextHandler {
- return &TextHandler{template: t, output: output}
-}
-
-// Log makes a LogEvent, formats it with the configured template, then passes
-// the output to configured output sink
-func (h *TextHandler) Log(logger_name string, level LogLevel, msg string,
- calldepth int) {
- h.mtx.RLock()
- output, template := h.output, h.template
- h.mtx.RUnlock()
- event := LogEvent{
- LoggerName: logger_name,
- Level: level,
- Message: strings.TrimRight(msg, "\n\r"),
- Timestamp: time.Now()}
- if calldepth >= 0 {
- _, event.Filepath, event.Line, _ = runtime.Caller(calldepth + 1)
- }
- var buf bytes.Buffer
- err := template.Execute(&buf, &event)
- if err != nil {
- output.Output(level, []byte(
- fmt.Sprintf("log format template failed: %s", err)))
- return
- }
- output.Output(level, buf.Bytes())
-}
-
-// SetTextTemplate changes the TextHandler's text formatting template
-func (h *TextHandler) SetTextTemplate(t *template.Template) {
- h.mtx.Lock()
- defer h.mtx.Unlock()
- h.template = t
-}
-
-// SetTextOutput changes the TextHandler's TextOutput sink
-func (h *TextHandler) SetTextOutput(output TextOutput) {
- h.mtx.Lock()
- defer h.mtx.Unlock()
- h.output = output
-}
diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile
index b3ce3df30..d896edc99 100644
--- a/vendor/go.opencensus.io/Makefile
+++ b/vendor/go.opencensus.io/Makefile
@@ -91,7 +91,7 @@ embedmd:
.PHONY: install-tools
install-tools:
- go get -u golang.org/x/lint/golint
- go get -u golang.org/x/tools/cmd/cover
- go get -u golang.org/x/tools/cmd/goimports
- go get -u github.com/rakyll/embedmd
+ go install golang.org/x/lint/golint@latest
+ go install golang.org/x/tools/cmd/cover@latest
+ go install golang.org/x/tools/cmd/goimports@latest
+ go install github.com/rakyll/embedmd@latest
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
index e5e4b4368..11e31f421 100644
--- a/vendor/go.opencensus.io/opencensus.go
+++ b/vendor/go.opencensus.io/opencensus.go
@@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
- return "0.23.0"
+ return "0.24.0"
}
diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go
index 00d473ee0..31477a464 100644
--- a/vendor/go.opencensus.io/stats/doc.go
+++ b/vendor/go.opencensus.io/stats/doc.go
@@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording.
OpenCensus allows users to create typed measures, record measurements,
aggregate the collected data, and export the aggregated data.
-Measures
+# Measures
A measure represents a type of data point to be tracked and recorded.
For example, latency, request Mb/s, and response Mb/s are measures
@@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then
create views and collect and break down measures by the tags they are
interested in.
-Recording measurements
+# Recording measurements
Measurement is a data point to be collected for a measure. For example,
for a latency (ms) measure, 100 is a measurement that represents a 100ms
@@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide
on which measurements they want to collect by registering views. This allows
libraries to turn on the instrumentation by default.
-Exemplars
+# Exemplars
For a given recorded measurement, the associated exemplar is a diagnostic map
that gives more information about the measurement.
@@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen
When exported to a supporting back end, you should be able to easily navigate
to example traces that fell into each bucket in the Distribution.
-
*/
package stats // import "go.opencensus.io/stats"
diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go
index 36935e629..436dc791f 100644
--- a/vendor/go.opencensus.io/stats/internal/record.go
+++ b/vendor/go.opencensus.io/stats/internal/record.go
@@ -21,5 +21,11 @@ import (
// DefaultRecorder will be called for each Record call.
var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
+// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but
+// avoids interface{} conversion.
+// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type,
+// but is interface{} here to avoid import loops
+var MeasurementRecorder interface{}
+
// SubscriptionReporter reports when a view subscribed with a measure.
var SubscriptionReporter func(measure string)
diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go
index 2b9728346..8b5b99803 100644
--- a/vendor/go.opencensus.io/stats/record.go
+++ b/vendor/go.opencensus.io/stats/record.go
@@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions {
return o
}
+type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{})
+
// Record records one or multiple measurements with the same context at once.
// If there are any tags in the context, measurements will be tagged with them.
func Record(ctx context.Context, ms ...Measurement) {
- RecordWithOptions(ctx, WithMeasurements(ms...))
+ // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality
+ // (RecordOptions) we can reduce some allocations to speed up this hot path
+ if len(ms) == 0 {
+ return
+ }
+ recorder := internal.MeasurementRecorder.(measurementRecorder)
+ record := false
+ for _, m := range ms {
+ if m.desc.subscribed() {
+ record = true
+ break
+ }
+ }
+ if !record {
+ return
+ }
+ recorder(tag.FromContext(ctx), ms, nil)
+ return
}
// RecordWithTags records one or multiple measurements at once.
diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go
index 748bd568c..61f72d20d 100644
--- a/vendor/go.opencensus.io/stats/view/aggregation.go
+++ b/vendor/go.opencensus.io/stats/view/aggregation.go
@@ -90,9 +90,9 @@ func Sum() *Aggregation {
//
// If len(bounds) >= 2 then the boundaries for bucket index i are:
//
-// [-infinity, bounds[i]) for i = 0
-// [bounds[i-1], bounds[i]) for 0 < i < length
-// [bounds[i-1], +infinity) for i = length
+// [-infinity, bounds[i]) for i = 0
+// [bounds[i-1], bounds[i]) for 0 < i < length
+// [bounds[i-1], +infinity) for i = length
//
// If len(bounds) is 0 then there is no histogram associated with the
// distribution. There will be a single bucket with boundaries
diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go
index ac22c93a2..bcd6e08c7 100644
--- a/vendor/go.opencensus.io/stats/view/collector.go
+++ b/vendor/go.opencensus.io/stats/view/collector.go
@@ -59,8 +59,15 @@ func (c *collector) clearRows() {
// encodeWithKeys encodes the map by using values
// only associated with the keys provided.
func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
+ // Compute the buffer length we will need ahead of time to avoid resizing later
+ reqLen := 0
+ for _, k := range keys {
+ s, _ := m.Value(k)
+ // We will store each key + its length
+ reqLen += len(s) + 1
+ }
vb := &tagencoding.Values{
- Buffer: make([]byte, len(keys)),
+ Buffer: make([]byte, reqLen),
}
for _, k := range keys {
v, _ := m.Value(k)
diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go
index 7bbedfe1f..60bf0e392 100644
--- a/vendor/go.opencensus.io/stats/view/doc.go
+++ b/vendor/go.opencensus.io/stats/view/doc.go
@@ -34,7 +34,7 @@
// Libraries can define views but it is recommended that in most cases registering
// views be left up to applications.
//
-// Exporting
+// # Exporting
//
// Collected and aggregated data can be exported to a metric collection
// backend by registering its exporter.
diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go
index 6e8d18b7f..6a79cd8a3 100644
--- a/vendor/go.opencensus.io/stats/view/worker.go
+++ b/vendor/go.opencensus.io/stats/view/worker.go
@@ -33,6 +33,7 @@ func init() {
defaultWorker = NewMeter().(*worker)
go defaultWorker.start()
internal.DefaultRecorder = record
+ internal.MeasurementRecorder = recordMeasurement
}
type measureRef struct {
@@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
defaultWorker.Record(tags, ms, attachments)
}
+func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
+ defaultWorker.recordMeasurement(tags, ms, attachments)
+}
+
// Record records a set of measurements ms associated with the given tags and attachments.
func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
+ w.recordMeasurement(tags, ms.([]stats.Measurement), attachments)
+}
+
+// recordMeasurement records a set of measurements ms associated with the given tags and attachments.
+// This is the same as Record but without an interface{} type to avoid allocations
+func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
req := &recordReq{
tm: tags,
- ms: ms.([]stats.Measurement),
+ ms: ms,
attachments: attachments,
t: time.Now(),
}
@@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) {
defaultWorker.SetReportingPeriod(d)
}
+// Stop stops the default worker.
+func Stop() {
+ defaultWorker.Stop()
+}
+
// SetReportingPeriod sets the interval between reporting aggregated views in
// the program. If duration is less than or equal to zero, it enables the
// default behavior.
@@ -281,7 +297,7 @@ func (w *worker) start() {
case <-w.quit:
w.timer.Stop()
close(w.c)
- w.done <- true
+ close(w.done)
return
}
}
@@ -290,8 +306,11 @@ func (w *worker) start() {
func (w *worker) Stop() {
prodMgr := metricproducer.GlobalManager()
prodMgr.DeleteProducer(w)
-
- w.quit <- true
+ select {
+ case <-w.quit:
+ default:
+ close(w.quit)
+ }
<-w.done
}
diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go
index b34d95e34..8fb17226f 100644
--- a/vendor/go.opencensus.io/tag/profile_19.go
+++ b/vendor/go.opencensus.io/tag/profile_19.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.9
// +build go1.9
package tag
diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go
index 83adbce56..e28cf13cd 100644
--- a/vendor/go.opencensus.io/tag/profile_not19.go
+++ b/vendor/go.opencensus.io/tag/profile_not19.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !go1.9
// +build !go1.9
package tag
diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go
index 04b1ee4f3..7a1616a55 100644
--- a/vendor/go.opencensus.io/trace/doc.go
+++ b/vendor/go.opencensus.io/trace/doc.go
@@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing.
The following assumes a basic familiarity with OpenCensus concepts.
See http://opencensus.io
-
-Exporting Traces
+# Exporting Traces
To export collected tracing data, register at least one exporter. You can use
one of the provided exporters or write your own.
- trace.RegisterExporter(exporter)
+ trace.RegisterExporter(exporter)
By default, traces will be sampled relatively rarely. To change the sampling
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
Be careful about using trace.AlwaysSample in a production application with
significant traffic: a new trace will be started and exported for every request.
-Adding Spans to a Trace
+# Adding Spans to a Trace
A trace consists of a tree of spans. In Go, the current span is carried in a
context.Context.
@@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
- ctx, span := trace.StartSpan(ctx, "example.com/Run")
- defer span.End()
+ ctx, span := trace.StartSpan(ctx, "example.com/Run")
+ defer span.End()
StartSpan will create a new top-level span if the context
doesn't contain another span, otherwise it will create a child span.
diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go
index 908c2497e..80095a5f6 100644
--- a/vendor/go.opencensus.io/trace/lrumap.go
+++ b/vendor/go.opencensus.io/trace/lrumap.go
@@ -44,7 +44,7 @@ func (lm lruMap) len() int {
}
func (lm lruMap) keys() []interface{} {
- keys := make([]interface{}, len(lm.cacheKeys))
+ keys := make([]interface{}, 0, len(lm.cacheKeys))
for k := range lm.cacheKeys {
keys = append(keys, k)
}
diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go
index b7d8aaf28..b8fc1e495 100644
--- a/vendor/go.opencensus.io/trace/trace_go11.go
+++ b/vendor/go.opencensus.io/trace/trace_go11.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.11
// +build go1.11
package trace
diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go
index e25419859..da488fc87 100644
--- a/vendor/go.opencensus.io/trace/trace_nongo11.go
+++ b/vendor/go.opencensus.io/trace/trace_nongo11.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !go1.11
// +build !go1.11
package trace
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
index 5fe03f21b..6f87f33fa 100644
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -4,6 +4,16 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [1.11.0] - 2023-05-02
+### Fixed
+- Fix initialization of `Value` wrappers.
+
+### Added
+- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print
+underlying values of pointers.
+
+[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0
+
## [1.10.0] - 2022-08-11
### Added
- Add `atomic.Float32` type for atomic operations on `float32`.
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
index dfa2085f4..f0a2ddd14 100644
--- a/vendor/go.uber.org/atomic/bool.go
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
index 6f4157445..7c23868fc 100644
--- a/vendor/go.uber.org/atomic/duration.go
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
index 27b23ea16..b7e3f1291 100644
--- a/vendor/go.uber.org/atomic/error.go
+++ b/vendor/go.uber.org/atomic/error.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -52,7 +52,17 @@ func (x *Error) Store(val error) {
// CompareAndSwap is an atomic compare-and-swap for error values.
func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
- return x.v.CompareAndSwap(packError(old), packError(new))
+ if x.v.CompareAndSwap(packError(old), packError(new)) {
+ return true
+ }
+
+ if old == _zeroError {
+ // If the old value is the empty value, then it's possible the
+ // underlying Value hasn't been set and is nil, so retry with nil.
+ return x.v.CompareAndSwap(nil, packError(new))
+ }
+
+ return false
}
// Swap atomically stores the given error and returns the old
diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go
index 5d535a6d2..62c36334f 100644
--- a/vendor/go.uber.org/atomic/float32.go
+++ b/vendor/go.uber.org/atomic/float32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
index 11d5189a5..5bc11caab 100644
--- a/vendor/go.uber.org/atomic/float64.go
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
index b9a68f42c..5320eac10 100644
--- a/vendor/go.uber.org/atomic/int32.go
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
index 78d260976..460821d00 100644
--- a/vendor/go.uber.org/atomic/int64.go
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go
index e0f47dba4..1fb6c03b2 100644
--- a/vendor/go.uber.org/atomic/pointer_go118.go
+++ b/vendor/go.uber.org/atomic/pointer_go118.go
@@ -18,43 +18,14 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-//go:build go1.18 && !go1.19
-// +build go1.18,!go1.19
+//go:build go1.18
+// +build go1.18
package atomic
-import "unsafe"
+import "fmt"
-type Pointer[T any] struct {
- _ nocmp // disallow non-atomic comparison
- p UnsafePointer
-}
-
-// NewPointer creates a new Pointer.
-func NewPointer[T any](v *T) *Pointer[T] {
- var p Pointer[T]
- if v != nil {
- p.p.Store(unsafe.Pointer(v))
- }
- return &p
-}
-
-// Load atomically loads the wrapped value.
-func (p *Pointer[T]) Load() *T {
- return (*T)(p.p.Load())
-}
-
-// Store atomically stores the passed value.
-func (p *Pointer[T]) Store(val *T) {
- p.p.Store(unsafe.Pointer(val))
-}
-
-// Swap atomically swaps the wrapped pointer and returns the old value.
-func (p *Pointer[T]) Swap(val *T) (old *T) {
- return (*T)(p.p.Swap(unsafe.Pointer(val)))
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
- return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
+// String returns a human readable representation of a Pointer's underlying value.
+func (p *Pointer[T]) String() string {
+ return fmt.Sprint(p.Load())
}
diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go
new file mode 100644
index 000000000..e0f47dba4
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go118_pre119.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18 && !go1.19
+// +build go1.18,!go1.19
+
+package atomic
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p UnsafePointer
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(unsafe.Pointer(v))
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return (*T)(p.p.Load())
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(unsafe.Pointer(val))
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return (*T)(p.p.Swap(unsafe.Pointer(val)))
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
+}
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
index c4bea70f4..061466c5b 100644
--- a/vendor/go.uber.org/atomic/string.go
+++ b/vendor/go.uber.org/atomic/string.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -42,24 +42,31 @@ func NewString(val string) *String {
// Load atomically loads the wrapped string.
func (x *String) Load() string {
- if v := x.v.Load(); v != nil {
- return v.(string)
- }
- return _zeroString
+ return unpackString(x.v.Load())
}
// Store atomically stores the passed string.
func (x *String) Store(val string) {
- x.v.Store(val)
+ x.v.Store(packString(val))
}
// CompareAndSwap is an atomic compare-and-swap for string values.
func (x *String) CompareAndSwap(old, new string) (swapped bool) {
- return x.v.CompareAndSwap(old, new)
+ if x.v.CompareAndSwap(packString(old), packString(new)) {
+ return true
+ }
+
+ if old == _zeroString {
+ // If the old value is the empty value, then it's possible the
+ // underlying Value hasn't been set and is nil, so retry with nil.
+ return x.v.CompareAndSwap(nil, packString(new))
+ }
+
+ return false
}
// Swap atomically stores the given string and returns the old
// value.
func (x *String) Swap(val string) (old string) {
- return x.v.Swap(val).(string)
+ return unpackString(x.v.Swap(packString(val)))
}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
index 1f63dfd5b..019109c86 100644
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,7 +20,18 @@
package atomic
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go
+
+func packString(s string) interface{} {
+ return s
+}
+
+func unpackString(v interface{}) string {
+ if s, ok := v.(string); ok {
+ return s
+ }
+ return ""
+}
// String returns the wrapped value.
func (s *String) String() string {
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
index 1660feb14..cc2a230c0 100644
--- a/vendor/go.uber.org/atomic/time.go
+++ b/vendor/go.uber.org/atomic/time.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
index d6f04a96d..4adc294ac 100644
--- a/vendor/go.uber.org/atomic/uint32.go
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
index 2574bdd5e..0e2eddb30 100644
--- a/vendor/go.uber.org/atomic/uint64.go
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
index 81b275a7a..7d5b000d6 100644
--- a/vendor/go.uber.org/atomic/uintptr.go
+++ b/vendor/go.uber.org/atomic/uintptr.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/dig/.codecov.yml b/vendor/go.uber.org/dig/.codecov.yml
new file mode 100644
index 000000000..149f56c9e
--- /dev/null
+++ b/vendor/go.uber.org/dig/.codecov.yml
@@ -0,0 +1,19 @@
+coverage:
+ range: 70..98
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 97 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
+ patch:
+ default:
+ enabled: yes
+ target: 70
diff --git a/vendor/go.uber.org/dig/.gitignore b/vendor/go.uber.org/dig/.gitignore
new file mode 100644
index 000000000..d651c3573
--- /dev/null
+++ b/vendor/go.uber.org/dig/.gitignore
@@ -0,0 +1,12 @@
+/bin
+/vendor
+/.bench
+*.mem
+*.cpu
+*.test
+*.log
+*.out
+*.html
+*.coverprofile
+coverage.txt
+*.pprof
diff --git a/vendor/go.uber.org/dig/CHANGELOG.md b/vendor/go.uber.org/dig/CHANGELOG.md
new file mode 100644
index 000000000..2989c1bba
--- /dev/null
+++ b/vendor/go.uber.org/dig/CHANGELOG.md
@@ -0,0 +1,294 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [1.17.0] - 2023-05-02
+### Added
+- Allow using `dig.As` with `dig.Group`.
+- Add `FillInvokeInfo` Option and `InvokeInfo` struct to help
+ extract the types requested by an `Invoke` statement.
+- To get visibility into constructor and decorator calls, introduce
+ `WithCallback` Option to provide callback functions.
+
+[1.17.0]: https://github.com/uber-go/dig/compare/v1.16.1...v1.17.0
+
+## [1.16.1] - 2023-01-10
+### Fixed
+- A panic when `DryRun` was used with `Decorate`.
+
+[1.16.1]: https://github.com/uber-go/dig/compare/v1.16.0...v1.16.1
+
+## [1.16.0] - 2023-01-03
+### Added
+- Add `RecoverFromPanics` option, which provides panic-recovery mechanism for Container.
+- Add `Error` interface which enables distinguishing errors from Dig using standard `errors`
+ package.
+
+Thanks to @mie998 for their contribution(s) to this release.
+
+[1.16.0]: https://github.com/uber-go/dig/compare/v1.15.0...v1.16.0
+
+## [1.15.0] - 2022-08-02
+### Added
+- Support for `soft` value groups, which specify a value group that only gets populated
+ with values from already-executed constructors.
+
+### Fixed
+- Fix an issue with invoke order affecting results provided by private provides
+
+Thanks to @hbdf for their contributions to this release.
+
+[1.15.0]: https://github.com/uber-go/dig/compare/v1.14.1...v1.15.0
+
+## [1.14.1] - 2022-03-22
+### Fixed
+- Fix an issue where a dependency for a decoration supplied by another decorator in the
+ same scope is ignored.
+- Fix a panic when submitting a single value as a value group in `Scope.Decorate`.
+- Upon a provide error, make the error message contain the function named specified
+ by LocationForPC Option.
+
+[1.14.1]: https://github.com/uber-go/dig/compare/v1.14.0...v1.14.1
+
+## [1.14.0] - 2022-02-23
+### Added
+- Introduce `dig.Scope` which creates a scoped dependency injection
+ container to scope dependencies.
+- Introduce `Scope.Decorate` and `Container.Decorate` which allows a
+ decorator to modify a dependency already provided in the dependency graph.
+- Add `FillDecorateInfo` Option and `DecorateInfo` struct which exposes
+ information on what Dig was able to understand from the decorator provided
+ with `Scope.Decorate` or `Container.Decorate`.
+
+### Changed
+- The error message that appears when a cycle is detected in the dependency graph
+ has been changed slightly.
+
+### Fixed
+- A stack overflow bug that happens when cycles are introduced via self-pointing
+ dependencies with DeferAcyclicVerification.
+
+[1.14.0]: https://github.com/uber-go/dig/compare/v1.13.0...v1.14.0
+
+## [1.13.0] - 2021-09-21
+### Added
+- Introduce `As` option which supports providing a type as interface(s)
+ it implements to the container.
+- Add `LocationForPC` option which overrides the function inspection
+ for a program counter address to a provided function info.
+
+[1.13.0]: https://github.com/uber-go/dig/compare/v1.12.0...v1.13.0
+
+## [1.12.0] - 2021-07-29
+### Added
+- Support for ProvideInfo and FillProvideInfo that allow the caller of
+ `Provide` to get info about what dig understood from the constructor.
+
+[1.12.0]: https://github.com/uber-go/dig/compare/v1.11.0...v1.12.0
+
+## [1.11.0] - 2021-06-09
+### Added
+- Support unexported fields on `dig.In` structs with the
+ `ignore-unexported:"true` struct tag.
+
+[1.11.0]: https://github.com/uber-go/dig/compare/v1.10.0...v1.11.0
+
+## [1.10.0] - 2020-06-16
+### Added
+- Introduce `DryRun` Option which, when set to true, disables invocation
+ of functions supplied to `Provide` and `Invoke`. This option will be
+ used to build no-op containers, for example for `fx.ValidateApp` method.
+
+[1.10.0]: https://github.com/uber-go/dig/compare/v1.9.0...v1.10.0
+
+## [1.9.0] - 2020-03-31
+### Added
+- GraphViz visualization of the graph now includes names of packages next to
+ constructors.
+- Added a `flatten` modifier to group tags for slices to allow providing
+ individual elements instead of the slice for a group value. See package
+ doucmentation for more information.
+
+### Changed
+- Drop library dependency on `golang.org/x/lint`.
+- Support printing multi-line error messages with `%+v`.
+
+[1.9.0]: https://github.com/uber-go/dig/compare/v1.8.0...v1.9.0
+
+## [1.8.0] - 2019-11-14
+### Changed
+- Migrated to Go modules.
+
+[1.8.0]: https://github.com/uber-go/dig/compare/v1.7.0...v1.8.0
+
+## [1.7.0] - 2019-01-04
+### Added
+- Added `Group` option for `Provide` to add value groups to the container without
+rewriting constructors. See package doucmentation for more information.
+
+[1.7.0]: https://github.com/uber-go/dig/compare/v1.6.0...v1.7.0
+
+## [1.6.0] - 2018-11-06
+### Changed
+- When an error graph is visualized, the graph is pruned so that the graph only
+ contains failure nodes.
+- Container visualization is now oriented from right to left.
+
+[1.6.0]: https://github.com/uber-go/dig/compare/v1.5.1...v1.6.0
+
+## [1.5.1] - 2018-11-01
+### Fixed
+- Fixed a test that was causing Dig to be unusable with Go Modules.
+
+[1.5.1]: https://github.com/uber-go/dig/compare/v1.5.0...v1.5.1
+
+## [1.5.0] - 2018-09-19
+### Added
+- Added a `DeferAcyclicVerification` container option that defers graph cycle
+ detection until the next Invoke.
+
+### Changed
+- Improved cycle-detection performance by 50x in certain degenerative cases.
+
+[1.5.0]: https://github.com/uber-go/dig/compare/v1.4.0...v1.5.0
+
+## [1.4.0] - 2018-08-16
+### Added
+- Added `Visualize` function to visualize the state of the container in the
+ GraphViz DOT format. This allows visualization of error types and the
+ dependency relationships of types in the container.
+- Added `CanVisualizeError` function to determine if an error can be visualized
+ in the graph.
+- Added `Name` option for `Provide` to add named values to the container
+ without rewriting constructors. See package documentation for more
+ information.
+
+### Changed
+- `name:"..."` tags on nested Result Objects will now cause errors instead of
+ being ignored.
+
+[1.4.0]: https://github.com/uber-go/dig/compare/v1.3.0...v1.4.0
+
+## [1.3.0] - 2017-12-04
+### Changed
+- Improved messages for errors thrown by Dig under a many scenarios to be more
+ informative.
+
+[1.3.0]: https://github.com/uber-go/dig/compare/v1.2.0...v1.3.0
+
+## [1.2.0] - 2017-11-07
+### Added
+- `dig.In` and `dig.Out` now support value groups, making it possible to
+ produce many values of the same type from different constructors. See package
+ documentation for more information.
+
+[1.2.0]: https://github.com/uber-go/dig/compare/v1.1.0...v1.2.0
+
+## [1.1.0] - 2017-09-15
+### Added
+- Added the `dig.RootCause` function which allows retrieving the original
+ constructor error that caused an `Invoke` failure.
+
+### Changed
+- Errors from `Invoke` now attempt to hint to the user a presence of a similar
+ type, for example a pointer to the requested type and vice versa.
+
+[1.1.0]: https://github.com/uber-go/dig/compare/v1.0.0...v1.1.0
+
+## [1.0.0] - 2017-07-31
+
+First stable release: no breaking changes will be made in the 1.x series.
+
+### Changed
+- `Provide` and `Invoke` will now fail if `dig.In` or `dig.Out` structs
+ contain unexported fields. Previously these fields were ignored which often
+ led to confusion.
+
+[1.0.0]: https://github.com/uber-go/dig/compare/v1.0.0-rc2...v1.0.0
+
+## [1.0.0-rc2] - 2017-07-21
+### Added
+- Exported `dig.IsIn` and `dig.IsOut` so that consuming libraries can check if
+ a params or return struct embeds the `dig.In` and `dig.Out` types, respectively.
+
+### Changed
+- Added variadic options to all public APIS so that new functionality can be
+ introduced post v1.0.0 without introducing breaking changes.
+- Functions with variadic arguments can now be passed to `dig.Provide` and
+ `dig.Invoke`. Previously this caused an error, whereas now the args will be ignored.
+
+[1.0.0-rc2]: https://github.com/uber-go/dig/compare/v1.0.0-rc1...v1.0.0-rc2
+
+## [1.0.0-rc1] - 2017-06-21
+
+First release candidate.
+
+[1.0.0-rc1]: https://github.com/uber-go/dig/compare/v0.5.0...v1.0.0-rc1
+
+
+## [0.5.0] - 2017-06-19
+### Added
+- `dig.In` and `dig.Out` now support named instances, i.e.:
+
+ ```go
+ type param struct {
+ dig.In
+
+ DB1 DB.Connection `name:"primary"`
+ DB2 DB.Connection `name:"secondary"`
+ }
+ ```
+
+### Fixed
+- Structs compatible with `dig.In` and `dig.Out` may now be generated using
+ `reflect.StructOf`.
+
+[0.5.0]: https://github.com/uber-go/dig/compare/v0.4.0...v0.5.0
+
+## [0.4.0] - 2017-06-12
+### Added
+- Add `dig.In` embeddable type for advanced use-cases of specifying dependencies.
+- Add `dig.Out` embeddable type for advanced use-cases of constructors
+ inserting types in the container.
+- Add support for optional parameters through `optional:"true"` tag on `dig.In` objects.
+- Add support for value types and many built-ins (maps, slices, channels).
+
+### Changed
+- **[Breaking]** Restrict the API surface to only `Provide` and `Invoke`.
+- **[Breaking]** Update `Provide` method to accept variadic arguments.
+
+### Removed
+- **[Breaking]** Remove `Must*` funcs to greatly reduce API surface area.
+- Providing constructors with common returned types results in an error.
+
+[0.4.0]: https://github.com/uber-go/dig/compare/v0.3...v0.4.0
+
+## [0.3] - 2017-05-02
+### Added
+- Add functionality to `Provide` to support constructor with `n` return
+ objects to be resolved into the `dig.Graph`
+- Add `Invoke` function to invoke provided function and insert return
+ objects into the `dig.Graph`
+
+### Changed
+- Rename `RegisterAll` and `MustRegisterAll` to `ProvideAll` and
+ `MustProvideAll`.
+
+[0.3]: https://github.com/uber-go/dig/compare/v0.2...v0.3
+
+## [0.2] - 2017-03-27
+### Changed
+- Rename `Register` to `Provide` for clarity and to recude clash with other
+ Register functions.
+- Rename `dig.Graph` to `dig.Container`.
+
+### Removed
+- Remove the package-level functions and the `DefaultGraph`.
+
+[0.2]: https://github.com/uber-go/dig/compare/v0.1...v0.2
+
+## 0.1 - 2017-03-23
+
+Initial release.
diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT b/vendor/go.uber.org/dig/LICENSE
similarity index 95%
rename from vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT
rename to vendor/go.uber.org/dig/LICENSE
index 72dc60d84..00a8d056d 100644
--- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT
+++ b/vendor/go.uber.org/dig/LICENSE
@@ -1,4 +1,4 @@
-The MIT License (MIT)
+Copyright (c) 2017-2018 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/dig/Makefile b/vendor/go.uber.org/dig/Makefile
new file mode 100644
index 000000000..5eb38a8cd
--- /dev/null
+++ b/vendor/go.uber.org/dig/Makefile
@@ -0,0 +1,67 @@
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+STATICCHECK = $(GOBIN)/staticcheck
+
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+
+GO_FILES = $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+MODULES = . ./tools
+
+.PHONY: all
+all: build lint test
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: install
+install:
+ $(foreach dir,$(MODULES),( \
+ cd $(dir) && \
+ go mod download) && \
+ ) true
+
+.PHONY: lint
+lint: $(GOLINT) $(STATICCHECK)
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @go vet ./... 2>&1 | tee -a lint.log
+ @echo "Checking lint..."
+ @$(GOLINT) ./... 2>&1 | tee -a lint.log
+ @echo "Checking staticcheck..."
+ @$(STATICCHECK) ./... 2>&1 | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
+ @echo "Checking for license headers..."
+ @./check_license.sh | tee -a lint.log
+ @[ ! -s lint.log ]
+
+$(GOLINT): tools/go.mod
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK): tools/go.mod
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: cover
+cover:
+ go test -race -coverprofile=cover.out -coverpkg=./... ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS)
+
+.PHONY: tidy
+tidy:
+ $(foreach dir,$(MODULES),(cd $(dir) && go mod tidy) &&) true
diff --git a/vendor/go.uber.org/dig/README.md b/vendor/go.uber.org/dig/README.md
new file mode 100644
index 000000000..39db90982
--- /dev/null
+++ b/vendor/go.uber.org/dig/README.md
@@ -0,0 +1,51 @@
+# :hammer_and_pick: dig [![GoDoc][doc-img]][doc] [![GitHub release][release-img]][release] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][report-card-img]][report-card]
+
+A reflection based dependency injection toolkit for Go.
+
+### Good for:
+
+* Powering an application framework, e.g. [Fx](https://github.com/uber-go/fx).
+* Resolving the object graph during process startup.
+
+### Bad for:
+
+* Using in place of an application framework, e.g. [Fx](https://github.com/uber-go/fx).
+* Resolving dependencies after the process has already started.
+* Exposing to user-land code as a [Service Locator](https://martinfowler.com/articles/injection.html#UsingAServiceLocator).
+
+## Installation
+
+We recommend consuming [SemVer](http://semver.org/) major version `1` using
+your dependency manager of choice.
+
+```
+$ glide get 'go.uber.org/dig#^1'
+$ dep ensure -add "go.uber.org/dig@v1"
+$ go get 'go.uber.org/dig@v1'
+```
+
+## Stability
+
+This library is `v1` and follows [SemVer](http://semver.org/) strictly.
+
+No breaking changes will be made to exported APIs before `v2.0.0`.
+
+[doc-img]: http://img.shields.io/badge/GoDoc-Reference-blue.svg
+[doc]: https://godoc.org/go.uber.org/dig
+
+[release-img]: https://img.shields.io/github/release/uber-go/dig.svg
+[release]: https://github.com/uber-go/dig/releases
+
+[ci-img]: https://github.com/uber-go/dig/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/dig/actions/workflows/go.yml
+
+[cov-img]: https://codecov.io/gh/uber-go/dig/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/dig/branch/master
+
+[report-card-img]: https://goreportcard.com/badge/github.com/uber-go/dig
+[report-card]: https://goreportcard.com/report/github.com/uber-go/dig
+
+## Stargazers over time
+
+[![Stargazers over time](https://starchart.cc/uber-go/dig.svg)](https://starchart.cc/uber-go/dig)
+
diff --git a/vendor/go.uber.org/dig/callback.go b/vendor/go.uber.org/dig/callback.go
new file mode 100644
index 000000000..c5caaec81
--- /dev/null
+++ b/vendor/go.uber.org/dig/callback.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+// CallbackInfo contains information about a provided function or decorator
+// called by Dig, and is passed to a [Callback] registered with
+// [WithProviderCallback] or [WithDecoratorCallback].
+type CallbackInfo struct {
+
+ // Name is the name of the function in the format:
+ // .
+ Name string
+
+ // Error contains the error returned by the [Callback]'s associated
+ // function, if any. When used in conjunction with [RecoverFromPanics],
+ // this will be set to a [PanicError] when the function panics.
+ Error error
+}
+
+// Callback is a function that can be registered with a provided function
+// or decorator with [WithCallback] to cause it to be called after the
+// provided function or decorator is run.
+type Callback func(CallbackInfo)
+
+// WithProviderCallback returns a [ProvideOption] which has Dig call
+// the passed in [Callback] after the corresponding constructor finishes running.
+//
+// For example, the following prints a completion message
+// after "myConstructor" finishes, including the error if any:
+//
+// c := dig.New()
+// myCallback := func(ci CallbackInfo) {
+// var errorAdd string
+// if ci.Error != nil {
+// errorAdd = fmt.Sprintf("with error: %v", ci.Error)
+// }
+// fmt.Printf("%q finished%v", ci.Name, errorAdd)
+// }
+// c.Provide(myConstructor, WithProviderCallback(myCallback)),
+//
+// Callbacks can also be specified for Decorators with [WithDecoratorCallback].
+//
+// See [CallbackInfo] for more info on the information passed to the [Callback].
+func WithProviderCallback(callback Callback) ProvideOption {
+ return withCallbackOption{
+ callback: callback,
+ }
+}
+
+// WithDecoratorCallback returns a [DecorateOption] which has Dig call
+// the passed in [Callback] after the corresponding decorator finishes running.
+//
+// For example, the following prints a completion message
+// after "myDecorator" finishes, including the error if any:
+//
+// c := dig.New()
+// myCallback := func(ci CallbackInfo) {
+// var errorAdd string
+// if ci.Error != nil {
+// errorAdd = fmt.Sprintf("with error: %v", ci.Error)
+// }
+// fmt.Printf("%q finished%v", ci.Name, errorAdd)
+// }
+// c.Decorate(myDecorator, WithDecoratorCallback(myCallback)),
+//
+// Callbacks can also be specified for Constructors with [WithProviderCallback].
+//
+// See [CallbackInfo] for more info on the information passed to the [Callback].
+func WithDecoratorCallback(callback Callback) DecorateOption {
+ return withCallbackOption{
+ callback: callback,
+ }
+}
+
+type withCallbackOption struct {
+ callback Callback
+}
+
+var (
+ _ ProvideOption = withCallbackOption{}
+ _ DecorateOption = withCallbackOption{}
+)
+
+func (o withCallbackOption) applyProvideOption(po *provideOptions) {
+ po.Callback = o.callback
+}
+
+func (o withCallbackOption) apply(do *decorateOptions) {
+ do.Callback = o.callback
+}
diff --git a/vendor/go.uber.org/dig/check_license.sh b/vendor/go.uber.org/dig/check_license.sh
new file mode 100644
index 000000000..345ac8b89
--- /dev/null
+++ b/vendor/go.uber.org/dig/check_license.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go")
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/dig/constructor.go b/vendor/go.uber.org/dig/constructor.go
new file mode 100644
index 000000000..208659d98
--- /dev/null
+++ b/vendor/go.uber.org/dig/constructor.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.uber.org/dig/internal/digerror"
+ "go.uber.org/dig/internal/digreflect"
+ "go.uber.org/dig/internal/dot"
+)
+
+// constructorNode is a node in the dependency graph that represents
+// a constructor provided by the user.
+//
+// constructorNodes can produce zero or more values that they store into the container.
+// For the Provide path, we verify that constructorNodes produce at least one value,
+// otherwise the function will never be called.
+type constructorNode struct {
+ ctor interface{}
+ ctype reflect.Type
+
+ // Location where this function was defined.
+ location *digreflect.Func
+
+ // id uniquely identifies the constructor that produces a node.
+ id dot.CtorID
+
+ // Whether the constructor owned by this node was already called.
+ called bool
+
+ // Type information about constructor parameters.
+ paramList paramList
+
+ // Type information about constructor results.
+ resultList resultList
+
+ // Order of this node in each Scopes' graphHolders.
+ orders map[*Scope]int
+
+ // Scope this node is part of.
+ s *Scope
+
+ // Scope this node was originally provided to.
+ // This is different from s if and only if the constructor was Provided with ExportOption.
+ origS *Scope
+
+ // Callback for this provided function, if there is one.
+ callback Callback
+}
+
+type constructorOptions struct {
+ // If specified, all values produced by this constructor have the provided name
+ // belong to the specified value group or implement any of the interfaces.
+ ResultName string
+ ResultGroup string
+ ResultAs []interface{}
+ Location *digreflect.Func
+ Callback Callback
+}
+
+func newConstructorNode(ctor interface{}, s *Scope, origS *Scope, opts constructorOptions) (*constructorNode, error) {
+ cval := reflect.ValueOf(ctor)
+ ctype := cval.Type()
+ cptr := cval.Pointer()
+
+ params, err := newParamList(ctype, s)
+ if err != nil {
+ return nil, err
+ }
+
+ results, err := newResultList(
+ ctype,
+ resultOptions{
+ Name: opts.ResultName,
+ Group: opts.ResultGroup,
+ As: opts.ResultAs,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ location := opts.Location
+ if location == nil {
+ location = digreflect.InspectFunc(ctor)
+ }
+
+ n := &constructorNode{
+ ctor: ctor,
+ ctype: ctype,
+ location: location,
+ id: dot.CtorID(cptr),
+ paramList: params,
+ resultList: results,
+ orders: make(map[*Scope]int),
+ s: s,
+ origS: origS,
+ callback: opts.Callback,
+ }
+ s.newGraphNode(n, n.orders)
+ return n, nil
+}
+
+func (n *constructorNode) Location() *digreflect.Func { return n.location }
+func (n *constructorNode) ParamList() paramList { return n.paramList }
+func (n *constructorNode) ResultList() resultList { return n.resultList }
+func (n *constructorNode) ID() dot.CtorID { return n.id }
+func (n *constructorNode) CType() reflect.Type { return n.ctype }
+func (n *constructorNode) Order(s *Scope) int { return n.orders[s] }
+func (n *constructorNode) OrigScope() *Scope { return n.origS }
+
+func (n *constructorNode) String() string {
+ return fmt.Sprintf("deps: %v, ctor: %v", n.paramList, n.ctype)
+}
+
+// Call calls this constructor if it hasn't already been called and
+// injects any values produced by it into the provided container.
+func (n *constructorNode) Call(c containerStore) (err error) {
+ if n.called {
+ return nil
+ }
+
+ if err := shallowCheckDependencies(c, n.paramList); err != nil {
+ return errMissingDependencies{
+ Func: n.location,
+ Reason: err,
+ }
+ }
+
+ args, err := n.paramList.BuildList(c)
+ if err != nil {
+ return errArgumentsFailed{
+ Func: n.location,
+ Reason: err,
+ }
+ }
+
+ if n.callback != nil {
+ // Wrap in separate func to include PanicErrors
+ defer func() {
+ n.callback(CallbackInfo{
+ Name: fmt.Sprintf("%v.%v", n.location.Package, n.location.Name),
+ Error: err,
+ })
+ }()
+ }
+
+ if n.s.recoverFromPanics {
+ defer func() {
+ if p := recover(); p != nil {
+ err = PanicError{
+ fn: n.location,
+ Panic: p,
+ }
+ }
+ }()
+ }
+
+ receiver := newStagingContainerWriter()
+ results := c.invoker()(reflect.ValueOf(n.ctor), args)
+ if err = n.resultList.ExtractList(receiver, false /* decorating */, results); err != nil {
+ return errConstructorFailed{Func: n.location, Reason: err}
+ }
+
+ // Commit the result to the original container that this constructor
+ // was supplied to. The provided constructor is only used for a view of
+ // the rest of the graph to instantiate the dependencies of this
+ // container.
+ receiver.Commit(n.s)
+ n.called = true
+ return nil
+}
+
+// stagingContainerWriter is a containerWriter that records the changes that
+// would be made to a containerWriter and defers them until Commit is called.
+type stagingContainerWriter struct {
+ values map[key]reflect.Value
+ groups map[key][]reflect.Value
+}
+
+var _ containerWriter = (*stagingContainerWriter)(nil)
+
+func newStagingContainerWriter() *stagingContainerWriter {
+ return &stagingContainerWriter{
+ values: make(map[key]reflect.Value),
+ groups: make(map[key][]reflect.Value),
+ }
+}
+
+func (sr *stagingContainerWriter) setValue(name string, t reflect.Type, v reflect.Value) {
+ sr.values[key{t: t, name: name}] = v
+}
+
+func (sr *stagingContainerWriter) setDecoratedValue(_ string, _ reflect.Type, _ reflect.Value) {
+ digerror.BugPanicf("stagingContainerWriter.setDecoratedValue must never be called")
+}
+
+func (sr *stagingContainerWriter) submitGroupedValue(group string, t reflect.Type, v reflect.Value) {
+ k := key{t: t, group: group}
+ sr.groups[k] = append(sr.groups[k], v)
+}
+
+func (sr *stagingContainerWriter) submitDecoratedGroupedValue(_ string, _ reflect.Type, _ reflect.Value) {
+ digerror.BugPanicf("stagingContainerWriter.submitDecoratedGroupedValue must never be called")
+}
+
+// Commit commits the received results to the provided containerWriter.
+func (sr *stagingContainerWriter) Commit(cw containerWriter) {
+ for k, v := range sr.values {
+ cw.setValue(k.name, k.t, v)
+ }
+
+ for k, vs := range sr.groups {
+ for _, v := range vs {
+ cw.submitGroupedValue(k.group, k.t, v)
+ }
+ }
+}
diff --git a/vendor/go.uber.org/dig/container.go b/vendor/go.uber.org/dig/container.go
new file mode 100644
index 000000000..983fd3f99
--- /dev/null
+++ b/vendor/go.uber.org/dig/container.go
@@ -0,0 +1,282 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+
+ "go.uber.org/dig/internal/dot"
+)
+
+const (
+ _optionalTag = "optional"
+ _nameTag = "name"
+ _ignoreUnexportedTag = "ignore-unexported"
+)
+
+// Unique identification of an object in the graph.
+type key struct {
+ t reflect.Type
+
+ // Only one of name or group will be set.
+ name string
+ group string
+}
+
+func (k key) String() string {
+ if k.name != "" {
+ return fmt.Sprintf("%v[name=%q]", k.t, k.name)
+ }
+ if k.group != "" {
+ return fmt.Sprintf("%v[group=%q]", k.t, k.group)
+ }
+ return k.t.String()
+}
+
+// Option configures a Container.
+type Option interface {
+ applyOption(*Container)
+}
+
+// Container is a directed acyclic graph of types and their dependencies.
+// A Container is the root Scope that represents the top-level scoped
+// directed acyclic graph of the dependencies.
+type Container struct {
+ // this is the "root" Scope that represents the
+ // root of the scope tree.
+ scope *Scope
+}
+
+// containerWriter provides write access to the Container's underlying data
+// store.
+type containerWriter interface {
+ // setValue sets the value with the given name and type in the container.
+ // If a value with the same name and type already exists, it will be
+ // overwritten.
+ setValue(name string, t reflect.Type, v reflect.Value)
+
+ // setDecoratedValue sets a decorated value with the given name and type
+ // in the container. If a decorated value with the same name and type already
+ // exists, it will be overwritten.
+ setDecoratedValue(name string, t reflect.Type, v reflect.Value)
+
+ // submitGroupedValue submits a value to the value group with the provided
+ // name.
+ submitGroupedValue(name string, t reflect.Type, v reflect.Value)
+
+ // submitDecoratedGroupedValue submits a decorated value to the value group
+ // with the provided name.
+ submitDecoratedGroupedValue(name string, t reflect.Type, v reflect.Value)
+}
+
+// containerStore provides access to the Container's underlying data store.
+type containerStore interface {
+ containerWriter
+
+ // Adds a new graph node to the Container
+ newGraphNode(w interface{}, orders map[*Scope]int)
+
+ // Returns a slice containing all known types.
+ knownTypes() []reflect.Type
+
+ // Retrieves the value with the provided name and type, if any.
+ getValue(name string, t reflect.Type) (v reflect.Value, ok bool)
+
+ // Retrieves a decorated value with the provided name and type, if any.
+ getDecoratedValue(name string, t reflect.Type) (v reflect.Value, ok bool)
+
+ // Retrieves all values for the provided group and type.
+ //
+ // The order in which the values are returned is undefined.
+ getValueGroup(name string, t reflect.Type) []reflect.Value
+
+ // Retrieves all decorated values for the provided group and type, if any.
+ getDecoratedValueGroup(name string, t reflect.Type) (reflect.Value, bool)
+
+ // Returns the providers that can produce a value with the given name and
+ // type.
+ getValueProviders(name string, t reflect.Type) []provider
+
+ // Returns the providers that can produce values for the given group and
+ // type.
+ getGroupProviders(name string, t reflect.Type) []provider
+
+ // Returns the providers that can produce a value with the given name and
+ // type across all the Scopes that are in effect of this containerStore.
+ getAllValueProviders(name string, t reflect.Type) []provider
+
+ // Returns the decorator that can decorate values for the given name and
+ // type.
+ getValueDecorator(name string, t reflect.Type) (decorator, bool)
+
+ // Reutrns the decorator that can decorate values for the given group and
+ // type.
+ getGroupDecorator(name string, t reflect.Type) (decorator, bool)
+
+ // Reports a list of stores (starting at this store) up to the root
+ // store.
+ storesToRoot() []containerStore
+
+ createGraph() *dot.Graph
+
+ // Returns invokerFn function to use when calling arguments.
+ invoker() invokerFn
+}
+
+// New constructs a Container.
+func New(opts ...Option) *Container {
+ s := newScope()
+ c := &Container{scope: s}
+
+ for _, opt := range opts {
+ opt.applyOption(c)
+ }
+ return c
+}
+
+// DeferAcyclicVerification is an Option to override the default behavior
+// of container.Provide, deferring the dependency graph validation to no longer
+// run after each call to container.Provide. The container will instead verify
+// the graph on first `Invoke`.
+//
+// Applications adding providers to a container in a tight loop may experience
+// performance improvements by initializing the container with this option.
+func DeferAcyclicVerification() Option {
+ return deferAcyclicVerificationOption{}
+}
+
+type deferAcyclicVerificationOption struct{}
+
+func (deferAcyclicVerificationOption) String() string {
+ return "DeferAcyclicVerification()"
+}
+
+func (deferAcyclicVerificationOption) applyOption(c *Container) {
+ c.scope.deferAcyclicVerification = true
+}
+
+// RecoverFromPanics is an [Option] to recover from panics that occur while
+// running functions given to the container. When set, recovered panics
+// will be placed into a [PanicError], and returned at the invoke callsite.
+// See [PanicError] for an example on how to handle panics with this option
+// enabled, and distinguish them from errors.
+func RecoverFromPanics() Option {
+ return recoverFromPanicsOption{}
+}
+
+type recoverFromPanicsOption struct{}
+
+func (recoverFromPanicsOption) String() string {
+ return "RecoverFromPanics()"
+}
+
+func (recoverFromPanicsOption) applyOption(c *Container) {
+ c.scope.recoverFromPanics = true
+}
+
+// Changes the source of randomness for the container.
+//
+// This will help provide determinism during tests.
+func setRand(r *rand.Rand) Option {
+ return setRandOption{r: r}
+}
+
+type setRandOption struct{ r *rand.Rand }
+
+func (o setRandOption) String() string {
+ return fmt.Sprintf("setRand(%p)", o.r)
+}
+
+func (o setRandOption) applyOption(c *Container) {
+ c.scope.rand = o.r
+}
+
+// DryRun is an Option which, when set to true, disables invocation of functions supplied to
+// Provide and Invoke. Use this to build no-op containers.
+func DryRun(dry bool) Option {
+ return dryRunOption(dry)
+}
+
+type dryRunOption bool
+
+func (o dryRunOption) String() string {
+ return fmt.Sprintf("DryRun(%v)", bool(o))
+}
+
+func (o dryRunOption) applyOption(c *Container) {
+ if o {
+ c.scope.invokerFn = dryInvoker
+ } else {
+ c.scope.invokerFn = defaultInvoker
+ }
+}
+
+// invokerFn specifies how the container calls user-supplied functions.
+type invokerFn func(fn reflect.Value, args []reflect.Value) (results []reflect.Value)
+
+func defaultInvoker(fn reflect.Value, args []reflect.Value) []reflect.Value {
+ return fn.Call(args)
+}
+
+// Generates zero values for results without calling the supplied function.
+func dryInvoker(fn reflect.Value, _ []reflect.Value) []reflect.Value {
+ ft := fn.Type()
+ results := make([]reflect.Value, ft.NumOut())
+ for i := 0; i < ft.NumOut(); i++ {
+ results[i] = reflect.Zero(fn.Type().Out(i))
+ }
+
+ return results
+}
+
+// String representation of the entire Container
+func (c *Container) String() string {
+ return c.scope.String()
+}
+
+// Scope creates a child scope of the Container with the given name.
+func (c *Container) Scope(name string, opts ...ScopeOption) *Scope {
+ return c.scope.Scope(name, opts...)
+}
+
+type byTypeName []reflect.Type
+
+func (bs byTypeName) Len() int {
+ return len(bs)
+}
+
+func (bs byTypeName) Less(i int, j int) bool {
+ return fmt.Sprint(bs[i]) < fmt.Sprint(bs[j])
+}
+
+func (bs byTypeName) Swap(i int, j int) {
+ bs[i], bs[j] = bs[j], bs[i]
+}
+
+func shuffledCopy(rand *rand.Rand, items []reflect.Value) []reflect.Value {
+ newItems := make([]reflect.Value, len(items))
+ for i, j := range rand.Perm(len(items)) {
+ newItems[i] = items[j]
+ }
+ return newItems
+}
diff --git a/vendor/go.uber.org/dig/cycle_error.go b/vendor/go.uber.org/dig/cycle_error.go
new file mode 100644
index 000000000..a294aa979
--- /dev/null
+++ b/vendor/go.uber.org/dig/cycle_error.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "go.uber.org/dig/internal/digreflect"
+)
+
+type cycleErrPathEntry struct {
+ Key key
+ Func *digreflect.Func
+}
+
+type errCycleDetected struct {
+ Path []cycleErrPathEntry
+ scope *Scope
+}
+
+var _ digError = errCycleDetected{}
+
+func (e errCycleDetected) Error() string {
+ // We get something like,
+ //
+ // [scope "foo"]
+ // func(*bar) *foo provided by "path/to/package".NewFoo (path/to/file.go:42)
+ // depends on func(*baz) *bar provided by "another/package".NewBar (somefile.go:1)
+ // depends on func(*foo) baz provided by "somepackage".NewBar (anotherfile.go:2)
+ // depends on func(*bar) *foo provided by "path/to/package".NewFoo (path/to/file.go:42)
+ //
+ b := new(bytes.Buffer)
+
+ if name := e.scope.name; len(name) > 0 {
+ fmt.Fprintf(b, "[scope %q]\n", name)
+ }
+ for i, entry := range e.Path {
+ if i > 0 {
+ b.WriteString("\n\tdepends on ")
+ }
+ fmt.Fprintf(b, "%v provided by %v", entry.Key, entry.Func)
+ }
+ return b.String()
+}
+
+func (e errCycleDetected) writeMessage(w io.Writer, v string) {
+ fmt.Fprint(w, e.Error())
+}
+
+func (e errCycleDetected) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+// IsCycleDetected returns a boolean as to whether the provided error indicates
+// a cycle was detected in the container graph.
+func IsCycleDetected(err error) bool {
+ return errors.As(err, &errCycleDetected{})
+}
diff --git a/vendor/go.uber.org/dig/decorate.go b/vendor/go.uber.org/dig/decorate.go
new file mode 100644
index 000000000..df362e985
--- /dev/null
+++ b/vendor/go.uber.org/dig/decorate.go
@@ -0,0 +1,313 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.uber.org/dig/internal/digreflect"
+ "go.uber.org/dig/internal/dot"
+)
+
+type decoratorState int
+
+const (
+ decoratorReady decoratorState = iota
+ decoratorOnStack
+ decoratorCalled
+)
+
+type decorator interface {
+ Call(c containerStore) error
+ ID() dot.CtorID
+ State() decoratorState
+}
+
+type decoratorNode struct {
+ dcor interface{}
+ dtype reflect.Type
+
+ id dot.CtorID
+
+ // Location where this function was defined.
+ location *digreflect.Func
+
+ // Current state of this decorator
+ state decoratorState
+
+ // Parameters of the decorator.
+ params paramList
+
+ // Results of the decorator.
+ results resultList
+
+ // Order of this node in each Scopes' graphHolders.
+ orders map[*Scope]int
+
+ // Scope this node was originally provided to.
+ s *Scope
+
+ // Callback for this decorator, if there is one.
+ callback Callback
+}
+
+func newDecoratorNode(dcor interface{}, s *Scope, opts decorateOptions) (*decoratorNode, error) {
+ dval := reflect.ValueOf(dcor)
+ dtype := dval.Type()
+ dptr := dval.Pointer()
+
+ pl, err := newParamList(dtype, s)
+ if err != nil {
+ return nil, err
+ }
+
+ rl, err := newResultList(dtype, resultOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ n := &decoratorNode{
+ dcor: dcor,
+ dtype: dtype,
+ id: dot.CtorID(dptr),
+ location: digreflect.InspectFunc(dcor),
+ orders: make(map[*Scope]int),
+ params: pl,
+ results: rl,
+ s: s,
+ callback: opts.Callback,
+ }
+ return n, nil
+}
+
+func (n *decoratorNode) Call(s containerStore) (err error) {
+ if n.state == decoratorCalled {
+ return nil
+ }
+
+ n.state = decoratorOnStack
+
+ if err := shallowCheckDependencies(s, n.params); err != nil {
+ return errMissingDependencies{
+ Func: n.location,
+ Reason: err,
+ }
+ }
+
+ args, err := n.params.BuildList(n.s)
+ if err != nil {
+ return errArgumentsFailed{
+ Func: n.location,
+ Reason: err,
+ }
+ }
+
+ if n.callback != nil {
+ // Wrap in separate func to include PanicErrors
+ defer func() {
+ n.callback(CallbackInfo{
+ Name: fmt.Sprintf("%v.%v", n.location.Package, n.location.Name),
+ Error: err,
+ })
+ }()
+ }
+
+ if n.s.recoverFromPanics {
+ defer func() {
+ if p := recover(); p != nil {
+ err = PanicError{
+ fn: n.location,
+ Panic: p,
+ }
+ }
+ }()
+ }
+
+ results := s.invoker()(reflect.ValueOf(n.dcor), args)
+ if err = n.results.ExtractList(n.s, true /* decorated */, results); err != nil {
+ return err
+ }
+ n.state = decoratorCalled
+ return nil
+}
+
+func (n *decoratorNode) ID() dot.CtorID { return n.id }
+
+func (n *decoratorNode) State() decoratorState { return n.state }
+
+// DecorateOption modifies the default behavior of Decorate.
+type DecorateOption interface {
+ apply(*decorateOptions)
+}
+
+type decorateOptions struct {
+ Info *DecorateInfo
+ Callback Callback
+}
+
+// FillDecorateInfo is a DecorateOption that writes info on what Dig was
+// able to get out of the provided decorator into the provided DecorateInfo.
+func FillDecorateInfo(info *DecorateInfo) DecorateOption {
+ return fillDecorateInfoOption{info: info}
+}
+
+type fillDecorateInfoOption struct{ info *DecorateInfo }
+
+func (o fillDecorateInfoOption) String() string {
+ return fmt.Sprintf("FillDecorateInfo(%p)", o.info)
+}
+
+func (o fillDecorateInfoOption) apply(opts *decorateOptions) {
+ opts.Info = o.info
+}
+
+// DecorateInfo provides information about the decorator's inputs and outputs
+// types as strings, as well as the ID of the decorator supplied to the Container.
+type DecorateInfo struct {
+ ID ID
+ Inputs []*Input
+ Outputs []*Output
+}
+
+// Decorate provides a decorator for a type that has already been provided in the Container.
+// Decorations at this level affect all scopes of the container.
+// See Scope.Decorate for information on how to use this method.
+func (c *Container) Decorate(decorator interface{}, opts ...DecorateOption) error {
+ return c.scope.Decorate(decorator, opts...)
+}
+
+// Decorate provides a decorator for a type that has already been provided in the Scope.
+//
+// Similar to Provide, Decorate takes in a function with zero or more dependencies and one
+// or more results. Decorate can be used to modify a type that was already introduced to the
+// Scope, or completely replace it with a new object.
+//
+// For example,
+//
+// s.Decorate(func(log *zap.Logger) *zap.Logger {
+// return log.Named("myapp")
+// })
+//
+// This takes in a value, augments it with a name, and returns a replacement for it. Functions
+// in the Scope's dependency graph that use *zap.Logger will now use the *zap.Logger
+// returned by this decorator.
+//
+// A decorator can also take in multiple parameters and replace one of them:
+//
+// s.Decorate(func(log *zap.Logger, cfg *Config) *zap.Logger {
+// return log.Named(cfg.Name)
+// })
+//
+// Or replace a subset of them:
+//
+// s.Decorate(func(
+// log *zap.Logger,
+// cfg *Config,
+// scope metrics.Scope
+// ) (*zap.Logger, metrics.Scope) {
+// log = log.Named(cfg.Name)
+// scope = scope.With(metrics.Tag("service", cfg.Name))
+// return log, scope
+// })
+//
+// Decorating a Scope affects all the child scopes of this Scope.
+//
+// Similar to a provider, the decorator function gets called *at most once*.
+func (s *Scope) Decorate(decorator interface{}, opts ...DecorateOption) error {
+ var options decorateOptions
+ for _, opt := range opts {
+ opt.apply(&options)
+ }
+
+ dn, err := newDecoratorNode(decorator, s, options)
+ if err != nil {
+ return err
+ }
+
+ keys, err := findResultKeys(dn.results)
+ if err != nil {
+ return err
+ }
+ for _, k := range keys {
+ if _, ok := s.decorators[k]; ok {
+ return newErrInvalidInput(
+ fmt.Sprintf("cannot decorate using function %v: %s already decorated", dn.dtype, k), nil)
+ }
+ s.decorators[k] = dn
+ }
+
+ if info := options.Info; info != nil {
+ params := dn.params.DotParam()
+ results := dn.results.DotResult()
+ info.ID = (ID)(dn.id)
+ info.Inputs = make([]*Input, len(params))
+ info.Outputs = make([]*Output, len(results))
+
+ for i, param := range params {
+ info.Inputs[i] = &Input{
+ t: param.Type,
+ optional: param.Optional,
+ name: param.Name,
+ group: param.Group,
+ }
+ }
+ for i, res := range results {
+ info.Outputs[i] = &Output{
+ t: res.Type,
+ name: res.Name,
+ group: res.Group,
+ }
+ }
+ }
+ return nil
+}
+
+func findResultKeys(r resultList) ([]key, error) {
+ // use BFS to search for all keys included in a resultList.
+ var (
+ q []result
+ keys []key
+ )
+ q = append(q, r)
+
+ for len(q) > 0 {
+ res := q[0]
+ q = q[1:]
+
+ switch innerResult := res.(type) {
+ case resultSingle:
+ keys = append(keys, key{t: innerResult.Type, name: innerResult.Name})
+ case resultGrouped:
+ if innerResult.Type.Kind() != reflect.Slice {
+ return nil, newErrInvalidInput("decorating a value group requires decorating the entire value group, not a single value", nil)
+ }
+ keys = append(keys, key{t: innerResult.Type.Elem(), group: innerResult.Group})
+ case resultObject:
+ for _, f := range innerResult.Fields {
+ q = append(q, f.Result)
+ }
+ case resultList:
+ q = append(q, innerResult.Results...)
+ }
+ }
+ return keys, nil
+}
diff --git a/vendor/go.uber.org/dig/doc.go b/vendor/go.uber.org/dig/doc.go
new file mode 100644
index 000000000..3f9ab85c1
--- /dev/null
+++ b/vendor/go.uber.org/dig/doc.go
@@ -0,0 +1,348 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package dig provides an opinionated way of resolving object dependencies.
+//
+// # Status
+//
+// STABLE. No breaking changes will be made in this major version.
+//
+// # Container
+//
+// Dig exposes type Container as an object capable of resolving a directed
+// acyclic dependency graph. Use the New function to create one.
+//
+// c := dig.New()
+//
+// # Provide
+//
+// Constructors for different types are added to the container by using the
+// Provide method. A constructor can declare a dependency on another type by
+// simply adding it as a function parameter. Dependencies for a type can be
+// added to the graph both, before and after the type was added.
+//
+// err := c.Provide(func(conn *sql.DB) (*UserGateway, error) {
+// // ...
+// })
+// if err != nil {
+// // ...
+// }
+//
+// if err := c.Provide(newDBConnection); err != nil {
+// // ...
+// }
+//
+// Multiple constructors can rely on the same type. The container creates a
+// singleton for each retained type, instantiating it at most once when
+// requested directly or as a dependency of another type.
+//
+// err := c.Provide(func(conn *sql.DB) *CommentGateway {
+// // ...
+// })
+// if err != nil {
+// // ...
+// }
+//
+// Constructors can declare any number of dependencies as parameters and
+// optionally, return errors.
+//
+// err := c.Provide(func(u *UserGateway, c *CommentGateway) (*RequestHandler, error) {
+// // ...
+// })
+// if err != nil {
+// // ...
+// }
+//
+// if err := c.Provide(newHTTPServer); err != nil {
+// // ...
+// }
+//
+// Constructors can also return multiple results to add multiple types to the
+// container.
+//
+// err := c.Provide(func(conn *sql.DB) (*UserGateway, *CommentGateway, error) {
+// // ...
+// })
+// if err != nil {
+// // ...
+// }
+//
+// Constructors that accept a variadic number of arguments are treated as if
+// they don't have those arguments. That is,
+//
+// func NewVoteGateway(db *sql.DB, options ...Option) *VoteGateway
+//
+// Is treated the same as,
+//
+// func NewVoteGateway(db *sql.DB) *VoteGateway
+//
+// The constructor will be called with all other dependencies and no variadic
+// arguments.
+//
+// # Invoke
+//
+// Types added to the container may be consumed by using the Invoke method.
+// Invoke accepts any function that accepts one or more parameters and
+// optionally, returns an error. Dig calls the function with the requested
+// type, instantiating only those types that were requested by the function.
+// The call fails if any type or its dependencies (both direct and transitive)
+// were not available in the container.
+//
+// err := c.Invoke(func(l *log.Logger) {
+// // ...
+// })
+// if err != nil {
+// // ...
+// }
+//
+// err := c.Invoke(func(server *http.Server) error {
+// // ...
+// })
+// if err != nil {
+// // ...
+// }
+//
+// Any error returned by the invoked function is propagated back to the
+// caller.
+//
+// # Parameter Objects
+//
+// Constructors declare their dependencies as function parameters. This can
+// very quickly become unreadable if the constructor has a lot of
+// dependencies.
+//
+// func NewHandler(users *UserGateway, comments *CommentGateway, posts *PostGateway, votes *VoteGateway, authz *AuthZGateway) *Handler {
+// // ...
+// }
+//
+// A pattern employed to improve readability in a situation like this is to
+// create a struct that lists all the parameters of the function as fields and
+// changing the function to accept that struct instead. This is referred to as
+// a parameter object.
+//
+// Dig has first class support for parameter objects: any struct embedding
+// dig.In gets treated as a parameter object. The following is equivalent to
+// the constructor above.
+//
+// type HandlerParams struct {
+// dig.In
+//
+// Users *UserGateway
+// Comments *CommentGateway
+// Posts *PostGateway
+// Votes *VoteGateway
+// AuthZ *AuthZGateway
+// }
+//
+// func NewHandler(p HandlerParams) *Handler {
+// // ...
+// }
+//
+// Handlers can receive any combination of parameter objects and parameters.
+//
+// func NewHandler(p HandlerParams, l *log.Logger) *Handler {
+// // ...
+// }
+//
+// # Result Objects
+//
+// Result objects are the flip side of parameter objects. These are structs
+// that represent multiple outputs from a single function as fields in the
+// struct. Structs embedding dig.Out get treated as result objects.
+//
+// func SetupGateways(conn *sql.DB) (*UserGateway, *CommentGateway, *PostGateway, error) {
+// // ...
+// }
+//
+// The above is equivalent to,
+//
+// type Gateways struct {
+// dig.Out
+//
+// Users *UserGateway
+// Comments *CommentGateway
+// Posts *PostGateway
+// }
+//
+// func SetupGateways(conn *sql.DB) (Gateways, error) {
+// // ...
+// }
+//
+// # Optional Dependencies
+//
+// Constructors often don't have a hard dependency on some types and
+// are able to operate in a degraded state when that dependency is missing.
+// Dig supports declaring dependencies as optional by adding an
+// `optional:"true"` tag to fields of a dig.In struct.
+//
+// Fields in a dig.In structs that have the `optional:"true"` tag are treated
+// as optional by Dig.
+//
+// type UserGatewayParams struct {
+// dig.In
+//
+// Conn *sql.DB
+// Cache *redis.Client `optional:"true"`
+// }
+//
+// If an optional field is not available in the container, the constructor
+// will receive a zero value for the field.
+//
+// func NewUserGateway(p UserGatewayParams, log *log.Logger) (*UserGateway, error) {
+// if p.Cache == nil {
+// log.Print("Logging disabled")
+// }
+// // ...
+// }
+//
+// Constructors that declare dependencies as optional MUST handle the case of
+// those dependencies being absent.
+//
+// The optional tag also allows adding new dependencies without breaking
+// existing consumers of the constructor.
+//
+// # Named Values
+//
+// Some use cases call for multiple values of the same type. Dig allows adding
+// multiple values of the same type to the container with the use of Named
+// Values.
+//
+// Named Values can be produced by passing the dig.Name option when a
+// constructor is provided. All values produced by that constructor will have
+// the given name.
+//
+// Given the following constructors,
+//
+// func NewReadOnlyConnection(...) (*sql.DB, error)
+// func NewReadWriteConnection(...) (*sql.DB, error)
+//
+// You can provide *sql.DB into a Container under different names by passing
+// the dig.Name option.
+//
+// c.Provide(NewReadOnlyConnection, dig.Name("ro"))
+// c.Provide(NewReadWriteConnection, dig.Name("rw"))
+//
+// Alternatively, you can produce a dig.Out struct and tag its fields with
+// `name:".."` to have the corresponding value added to the graph under the
+// specified name.
+//
+// type ConnectionResult struct {
+// dig.Out
+//
+// ReadWrite *sql.DB `name:"rw"`
+// ReadOnly *sql.DB `name:"ro"`
+// }
+//
+// func ConnectToDatabase(...) (ConnectionResult, error) {
+// // ...
+// return ConnectionResult{ReadWrite: rw, ReadOnly: ro}, nil
+// }
+//
+// Regardless of how a Named Value was produced, it can be consumed by another
+// constructor by accepting a dig.In struct which has exported fields with the
+// same name AND type that you provided.
+//
+// type GatewayParams struct {
+// dig.In
+//
+// WriteToConn *sql.DB `name:"rw"`
+// ReadFromConn *sql.DB `name:"ro"`
+// }
+//
+// The name tag may be combined with the optional tag to declare the
+// dependency optional.
+//
+// type GatewayParams struct {
+// dig.In
+//
+// WriteToConn *sql.DB `name:"rw"`
+// ReadFromConn *sql.DB `name:"ro" optional:"true"`
+// }
+//
+// func NewCommentGateway(p GatewayParams, log *log.Logger) (*CommentGateway, error) {
+// if p.ReadFromConn == nil {
+// log.Print("Warning: Using RW connection for reads")
+// p.ReadFromConn = p.WriteToConn
+// }
+// // ...
+// }
+//
+// # Value Groups
+//
+// Added in Dig 1.2.
+//
+// Dig provides value groups to allow producing and consuming many values of
+// the same type. Value groups allow constructors to send values to a named,
+// unordered collection in the container. Other constructors can request all
+// values in this collection as a slice.
+//
+// Constructors can send values into value groups by returning a dig.Out
+// struct tagged with `group:".."`.
+//
+// type HandlerResult struct {
+// dig.Out
+//
+// Handler Handler `group:"server"`
+// }
+//
+// func NewHelloHandler() HandlerResult {
+// ..
+// }
+//
+// func NewEchoHandler() HandlerResult {
+// ..
+// }
+//
+// Any number of constructors may provide values to this named collection.
+// Other constructors can request all values for this collection by requesting
+// a slice tagged with `group:".."`. This will execute all constructors that
+// provide a value to that group in an unspecified order.
+//
+// type ServerParams struct {
+// dig.In
+//
+// Handlers []Handler `group:"server"`
+// }
+//
+// func NewServer(p ServerParams) *Server {
+// server := newServer()
+// for _, h := range p.Handlers {
+// server.Register(h)
+// }
+// return server
+// }
+//
+// Note that values in a value group are unordered. Dig makes no guarantees
+// about the order in which these values will be produced.
+//
+// Value groups can be used to provide multiple values for a group from a
+// dig.Out using slices, however considering groups are retrieved by requesting
+// a slice this implies that the values must be retrieved using a slice of
+// slices. As of dig v1.9.0, if you want to provide individual elements to the
+// group instead of the slice itself, you can add the `flatten` modifier to the
+// group from a dig.Out.
+//
+// type IntResult struct {
+// dig.Out
+//
+// Handler []int `group:"server"` // [][]int from dig.In
+// Handler []int `group:"server,flatten"` // []int from dig.In
+// }
+package dig // import "go.uber.org/dig"
diff --git a/vendor/go.uber.org/dig/error.go b/vendor/go.uber.org/dig/error.go
new file mode 100644
index 000000000..24c5685be
--- /dev/null
+++ b/vendor/go.uber.org/dig/error.go
@@ -0,0 +1,505 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+
+ "go.uber.org/dig/internal/digreflect"
+ "go.uber.org/dig/internal/dot"
+)
+
+// Error is an interface implemented by all Dig errors.
+//
+// Use this interface, in conjunction with [RootCause], in order to
+// determine if errors you encounter come from Dig, or if they come
+// from provided constructors or invoked functions. See [RootCause]
+// for more info.
+type Error interface {
+ error
+
+ // Writes the message or context for this error in the chain.
+ //
+ // Note: the Error interface must always have a private function
+ // such as this one in order to maintain properly sealed.
+ //
+ // verb is either %v or %+v.
+ writeMessage(w io.Writer, v string)
+}
+
+// a digError is a dig.Error with additional functionality for
+// internal use - namely the ability to be formatted.
+type digError interface {
+ Error
+ fmt.Formatter
+}
+
+// A PanicError occurs when a panic occurs while running functions given to the container
+// with the [RecoverFromPanic] option being set. It contains the panic message from the
+// original panic. A PanicError does not wrap other errors, and it does not implement
+// dig.Error, meaning it will be returned from [RootCause]. With the [RecoverFromPanic]
+// option set, a panic can be distinguished from dig errors and errors from provided/
+// invoked/decorated functions like so:
+//
+// rootCause := dig.RootCause(err)
+//
+// var pe dig.PanicError
+// var de dig.Error
+// if errors.As(rootCause, &pe) {
+// // This is caused by a panic
+// } else if errors.As(err, &de) {
+// // This is a dig error
+// } else {
+// // This is an error from one of my provided/invoked functions or decorators
+// }
+//
+// Or, if only interested in distinguishing panics from errors:
+//
+// var pe dig.PanicError
+// if errors.As(err, &pe) {
+// // This is caused by a panic
+// } else {
+// // This is an error
+// }
+type PanicError struct {
+
+ // The function the panic occurred at
+ fn *digreflect.Func
+
+ // The panic that was returned from recover()
+ Panic any
+}
+
+// Format will format the PanicError, expanding the corresponding function if in +v mode.
+func (e PanicError) Format(w fmt.State, c rune) {
+ if w.Flag('+') && c == 'v' {
+ fmt.Fprintf(w, "panic: %q in func: %+v", e.Panic, e.fn)
+ } else {
+ fmt.Fprintf(w, "panic: %q in func: %v", e.Panic, e.fn)
+ }
+}
+
+func (e PanicError) Error() string {
+ return fmt.Sprint(e)
+}
+
+// formatError will call a dig.Error's writeMessage() method to print the error message
+// and then will automatically attempt to print errors wrapped underneath (which can create
+// a recursive effect if the wrapped error's Format() method then points back to this function).
+func formatError(e digError, w fmt.State, v rune) {
+ multiline := w.Flag('+') && v == 'v'
+ verb := "%v"
+ if multiline {
+ verb = "%+v"
+ }
+
+ // "context: " or "context:\n"
+ e.writeMessage(w, verb)
+
+ // Will route back to this function recursively if next error
+ // is also wrapped and points back here
+ wrappedError := errors.Unwrap(e)
+ if wrappedError == nil {
+ return
+ }
+ io.WriteString(w, ":")
+ if multiline {
+ io.WriteString(w, "\n")
+ } else {
+ io.WriteString(w, " ")
+ }
+ fmt.Fprintf(w, verb, wrappedError)
+}
+
+// RootCause returns the first non-dig.Error in a chain of wrapped
+// errors, if there is one. Otherwise, RootCause returns the error
+// on the bottom of the chain of wrapped errors.
+//
+// Use this function and errors.As to differentiate between Dig errors
+// and errors thrown by provided constructors or invoked functions:
+//
+// rootCause := dig.RootCause(err)
+// var de dig.Error
+// if errors.As(rootCause, &de) {
+// // Is a Dig error
+// } else {
+// // Is an error thrown by one of my provided/invoked/decorated functions
+// }
+//
+// See [PanicError] for an example showing how to additionally detect
+// and handle panics in provided/invoked/decorated functions.
+func RootCause(err error) error {
+ var de Error
+ // Dig down to first non dig.Error, or bottom of chain
+ for ; errors.As(err, &de); err = errors.Unwrap(de) {
+ }
+
+ if err == nil {
+ return de
+ }
+
+ return err
+}
+
+// errInvalidInput is returned whenever the user provides bad input when
+// interacting with the container. May optionally have a more detailed
+// error wrapped underneath.
+type errInvalidInput struct {
+ Message string
+ Cause error
+}
+
+var _ digError = errInvalidInput{}
+
+// newErrInvalidInput creates a new errInvalidInput, wrapping the given
+// other error that caused this error. If there is no underlying cause,
+// pass in nil. This will cause all attempts to unwrap this error to return
+// nil, replicating errors.Unwrap's behavior when passed an error without
+// an Unwrap() method.
+func newErrInvalidInput(msg string, cause error) errInvalidInput {
+ return errInvalidInput{msg, cause}
+}
+
+func (e errInvalidInput) Error() string { return fmt.Sprint(e) }
+
+func (e errInvalidInput) Unwrap() error { return e.Cause }
+
+func (e errInvalidInput) writeMessage(w io.Writer, _ string) {
+ fmt.Fprintf(w, e.Message)
+}
+
+func (e errInvalidInput) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+// errProvide is returned when a constructor could not be Provided into the
+// container.
+type errProvide struct {
+ Func *digreflect.Func
+ Reason error
+}
+
+var _ digError = errProvide{}
+
+func (e errProvide) Error() string { return fmt.Sprint(e) }
+
+func (e errProvide) Unwrap() error { return e.Reason }
+
+func (e errProvide) writeMessage(w io.Writer, verb string) {
+ fmt.Fprintf(w, "cannot provide function "+verb, e.Func)
+}
+
+func (e errProvide) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+// errConstructorFailed is returned when a user-provided constructor failed
+// with a non-nil error.
+type errConstructorFailed struct {
+ Func *digreflect.Func
+ Reason error
+}
+
+var _ digError = errConstructorFailed{}
+
+func (e errConstructorFailed) Error() string { return fmt.Sprint(e) }
+
+func (e errConstructorFailed) Unwrap() error { return e.Reason }
+
+func (e errConstructorFailed) writeMessage(w io.Writer, verb string) {
+ fmt.Fprintf(w, "received non-nil error from function "+verb, e.Func)
+}
+
+func (e errConstructorFailed) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+// errArgumentsFailed is returned when a function could not be run because one
+// of its dependencies failed to build for any reason.
+type errArgumentsFailed struct {
+ Func *digreflect.Func
+ Reason error
+}
+
+var _ digError = errArgumentsFailed{}
+
+func (e errArgumentsFailed) Error() string { return fmt.Sprint(e) }
+
+func (e errArgumentsFailed) Unwrap() error { return e.Reason }
+
+func (e errArgumentsFailed) writeMessage(w io.Writer, verb string) {
+ fmt.Fprintf(w, "could not build arguments for function "+verb, e.Func)
+}
+
+func (e errArgumentsFailed) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+// errMissingDependencies is returned when the dependencies of a function are
+// not available in the container.
+type errMissingDependencies struct {
+ Func *digreflect.Func
+ Reason error
+}
+
+var _ digError = errMissingDependencies{}
+
+func (e errMissingDependencies) Error() string { return fmt.Sprint(e) }
+
+func (e errMissingDependencies) Unwrap() error { return e.Reason }
+
+func (e errMissingDependencies) writeMessage(w io.Writer, verb string) {
+ fmt.Fprintf(w, "missing dependencies for function "+verb, e.Func)
+}
+
+func (e errMissingDependencies) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+// errParamSingleFailed is returned when a paramSingle could not be built.
+type errParamSingleFailed struct {
+ Key key
+ Reason error
+ CtorID dot.CtorID
+}
+
+var _ digError = errParamSingleFailed{}
+
+func (e errParamSingleFailed) Error() string { return fmt.Sprint(e) }
+
+func (e errParamSingleFailed) Unwrap() error { return e.Reason }
+
+func (e errParamSingleFailed) writeMessage(w io.Writer, _ string) {
+ fmt.Fprintf(w, "failed to build %v", e.Key)
+}
+
+func (e errParamSingleFailed) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+func (e errParamSingleFailed) updateGraph(g *dot.Graph) {
+ failed := &dot.Result{
+ Node: &dot.Node{
+ Name: e.Key.name,
+ Group: e.Key.group,
+ Type: e.Key.t,
+ },
+ }
+ g.FailNodes([]*dot.Result{failed}, e.CtorID)
+}
+
+// errParamGroupFailed is returned when a value group cannot be built because
+// any of the values in the group failed to build.
+type errParamGroupFailed struct {
+ Key key
+ Reason error
+ CtorID dot.CtorID
+}
+
+var _ digError = errParamGroupFailed{}
+
+func (e errParamGroupFailed) Error() string { return fmt.Sprint(e) }
+
+func (e errParamGroupFailed) Unwrap() error { return e.Reason }
+
+func (e errParamGroupFailed) writeMessage(w io.Writer, _ string) {
+ fmt.Fprintf(w, "could not build value group %v", e.Key)
+}
+
+func (e errParamGroupFailed) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+func (e errParamGroupFailed) updateGraph(g *dot.Graph) {
+ g.FailGroupNodes(e.Key.group, e.Key.t, e.CtorID)
+}
+
+// missingType holds information about a type that was missing in the
+// container.
+type missingType struct {
+ Key key // item that was missing
+
+ // If non-empty, we will include suggestions for what the user may have
+ // meant.
+ suggestions []key
+}
+
+// Format prints a string representation of missingType.
+//
+// With %v, it prints a short representation ideal for an itemized list.
+//
+// io.Writer
+// io.Writer: did you mean *bytes.Buffer?
+// io.Writer: did you mean *bytes.Buffer, or *os.File?
+//
+// With %+v, it prints a longer representation ideal for standalone output.
+//
+// io.Writer: did you mean to Provide it?
+// io.Writer: did you mean to use *bytes.Buffer?
+// io.Writer: did you mean to use one of *bytes.Buffer, or *os.File?
+func (mt missingType) Format(w fmt.State, v rune) {
+ plusV := w.Flag('+') && v == 'v'
+
+ fmt.Fprint(w, mt.Key)
+ switch len(mt.suggestions) {
+ case 0:
+ if plusV {
+ io.WriteString(w, " (did you mean to Provide it?)")
+ }
+ case 1:
+ sug := mt.suggestions[0]
+ if plusV {
+ fmt.Fprintf(w, " (did you mean to use %v?)", sug)
+ } else {
+ fmt.Fprintf(w, " (did you mean %v?)", sug)
+ }
+ default:
+ if plusV {
+ io.WriteString(w, " (did you mean to use one of ")
+ } else {
+ io.WriteString(w, " (did you mean ")
+ }
+
+ lastIdx := len(mt.suggestions) - 1
+ for i, sug := range mt.suggestions {
+ if i > 0 {
+ io.WriteString(w, ", ")
+ if i == lastIdx {
+ io.WriteString(w, "or ")
+ }
+ }
+ fmt.Fprint(w, sug)
+ }
+ io.WriteString(w, "?)")
+ }
+}
+
+// errMissingType is returned when one or more values that were expected in
+// the container were not available.
+//
+// Multiple instances of this error may be merged together by appending them.
+type errMissingTypes []missingType // inv: len > 0
+
+var _ digError = errMissingTypes(nil)
+
+func newErrMissingTypes(c containerStore, k key) errMissingTypes {
+ // Possible types we will look for in the container. We will always look
+ // for pointers to the requested type and some extras on a per-Kind basis.
+ suggestions := []reflect.Type{reflect.PtrTo(k.t)}
+
+ if k.t.Kind() == reflect.Ptr {
+ // The user requested a pointer but maybe we have a value.
+ suggestions = append(suggestions, k.t.Elem())
+ }
+
+ knownTypes := c.knownTypes()
+ if k.t.Kind() == reflect.Interface {
+ // Maybe we have an implementation of the interface.
+ for _, t := range knownTypes {
+ if t.Implements(k.t) {
+ suggestions = append(suggestions, t)
+ }
+ }
+ } else {
+ // Maybe we have an interface that this type implements.
+ for _, t := range knownTypes {
+ if t.Kind() == reflect.Interface {
+ if k.t.Implements(t) {
+ suggestions = append(suggestions, t)
+ }
+ }
+ }
+ }
+
+ // range through c.providers is non-deterministic. Let's sort the list of
+ // suggestions.
+ sort.Sort(byTypeName(suggestions))
+
+ mt := missingType{Key: k}
+ for _, t := range suggestions {
+ if len(c.getValueProviders(k.name, t)) > 0 {
+ k.t = t
+ mt.suggestions = append(mt.suggestions, k)
+ }
+ }
+
+ return errMissingTypes{mt}
+}
+
+func (e errMissingTypes) Error() string { return fmt.Sprint(e) }
+
+func (e errMissingTypes) writeMessage(w io.Writer, v string) {
+
+ multiline := v == "%+v"
+
+ if len(e) == 1 {
+ io.WriteString(w, "missing type:")
+ } else {
+ io.WriteString(w, "missing types:")
+ }
+
+ if !multiline {
+ // With %v, we need a space between : since the error
+ // won't be on a new line.
+ io.WriteString(w, " ")
+ }
+
+ for i, mt := range e {
+ if multiline {
+ io.WriteString(w, "\n\t- ")
+ } else if i > 0 {
+ io.WriteString(w, "; ")
+ }
+
+ if multiline {
+ fmt.Fprintf(w, "%+v", mt)
+ } else {
+ fmt.Fprintf(w, "%v", mt)
+ }
+ }
+}
+
+func (e errMissingTypes) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+func (e errMissingTypes) updateGraph(g *dot.Graph) {
+ missing := make([]*dot.Result, len(e))
+
+ for i, mt := range e {
+ missing[i] = &dot.Result{
+ Node: &dot.Node{
+ Name: mt.Key.name,
+ Group: mt.Key.group,
+ Type: mt.Key.t,
+ },
+ }
+ }
+ g.AddMissingNodes(missing)
+}
+
+type errVisualizer interface {
+ updateGraph(*dot.Graph)
+}
diff --git a/vendor/go.uber.org/dig/glide.yaml b/vendor/go.uber.org/dig/glide.yaml
new file mode 100644
index 000000000..972b804cd
--- /dev/null
+++ b/vendor/go.uber.org/dig/glide.yaml
@@ -0,0 +1,7 @@
+package: go.uber.org/dig
+license: MIT
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
diff --git a/vendor/go.uber.org/dig/graph.go b/vendor/go.uber.org/dig/graph.go
new file mode 100644
index 000000000..e08f1f54b
--- /dev/null
+++ b/vendor/go.uber.org/dig/graph.go
@@ -0,0 +1,115 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import "go.uber.org/dig/internal/graph"
+
+// graphNode is a single node in the dependency graph.
+type graphNode struct {
+ Wrapped interface{}
+}
+
+// graphHolder is the dependency graph of the container.
+// It saves constructorNodes and paramGroupedSlice (value groups)
+// as nodes in the graph.
+// It implements the graph interface defined by internal/graph.
+// It has 1-1 correspondence with the Scope whose graph it represents.
+type graphHolder struct {
+ // all the nodes defined in the graph.
+ nodes []*graphNode
+
+ // Scope whose graph this holder contains.
+ s *Scope
+
+ // Number of nodes in the graph at last snapshot.
+ // -1 if no snapshot has been taken.
+ snap int
+}
+
+var _ graph.Graph = (*graphHolder)(nil)
+
+func newGraphHolder(s *Scope) *graphHolder {
+ return &graphHolder{s: s, snap: -1}
+}
+
+func (gh *graphHolder) Order() int { return len(gh.nodes) }
+
+// EdgesFrom returns the indices of nodes that are dependencies of node u.
+//
+// To do that, it needs to do one of the following:
+//
+// For constructor nodes, it retrieves the providers of the constructor's
+// parameters from the container and reports their orders.
+//
+// For value group nodes, it retrieves the group providers from the container
+// and reports their orders.
+func (gh *graphHolder) EdgesFrom(u int) []int {
+ var orders []int
+ switch w := gh.Lookup(u).(type) {
+ case *constructorNode:
+ for _, param := range w.paramList.Params {
+ orders = append(orders, getParamOrder(gh, param)...)
+ }
+ case *paramGroupedSlice:
+ providers := gh.s.getAllGroupProviders(w.Group, w.Type.Elem())
+ for _, provider := range providers {
+ orders = append(orders, provider.Order(gh.s))
+ }
+ }
+ return orders
+}
+
+// NewNode adds a new value to the graph and returns its order.
+func (gh *graphHolder) NewNode(wrapped interface{}) int {
+ order := len(gh.nodes)
+ gh.nodes = append(gh.nodes, &graphNode{
+ Wrapped: wrapped,
+ })
+ return order
+}
+
+// Lookup retrieves the value for the node with the given order.
+// Lookup panics if i is invalid.
+func (gh *graphHolder) Lookup(i int) interface{} {
+ return gh.nodes[i].Wrapped
+}
+
+// Snapshot takes a temporary snapshot of the current state of the graph.
+// Use with Rollback to undo changes to the graph.
+//
+// Only one snapshot is allowed at a time.
+// Multiple calls to snapshot will overwrite prior snapshots.
+func (gh *graphHolder) Snapshot() {
+ gh.snap = len(gh.nodes)
+}
+
+// Rollback rolls back a snapshot to a previously captured state.
+// This is a no-op if no snapshot was captured.
+func (gh *graphHolder) Rollback() {
+ if gh.snap < 0 {
+ return
+ }
+
+ // nodes is an append-only list. To rollback, we just drop the
+ // extraneous entries from the slice.
+ gh.nodes = gh.nodes[:gh.snap]
+ gh.snap = -1
+}
diff --git a/vendor/go.uber.org/dig/group.go b/vendor/go.uber.org/dig/group.go
new file mode 100644
index 000000000..b51361116
--- /dev/null
+++ b/vendor/go.uber.org/dig/group.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+const (
+ _groupTag = "group"
+)
+
+type group struct {
+ Name string
+ Flatten bool
+ Soft bool
+}
+
+type errInvalidGroupOption struct{ Option string }
+
+var _ digError = errInvalidGroupOption{}
+
+func (e errInvalidGroupOption) Error() string { return fmt.Sprint(e) }
+
+func (e errInvalidGroupOption) writeMessage(w io.Writer, v string) {
+ fmt.Fprintf(w, "invalid option %q", e.Option)
+}
+
+func (e errInvalidGroupOption) Format(w fmt.State, c rune) {
+ formatError(e, w, c)
+}
+
+func parseGroupString(s string) (group, error) {
+ components := strings.Split(s, ",")
+ g := group{Name: components[0]}
+ for _, c := range components[1:] {
+ switch c {
+ case "flatten":
+ g.Flatten = true
+ case "soft":
+ g.Soft = true
+ default:
+ return g, errInvalidGroupOption{Option: c}
+ }
+ }
+ return g, nil
+}
diff --git a/vendor/go.uber.org/dig/inout.go b/vendor/go.uber.org/dig/inout.go
new file mode 100644
index 000000000..3d5758429
--- /dev/null
+++ b/vendor/go.uber.org/dig/inout.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "container/list"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+var (
+ _noValue reflect.Value
+ _errType = reflect.TypeOf((*error)(nil)).Elem()
+ _inPtrType = reflect.TypeOf((*In)(nil))
+ _inType = reflect.TypeOf(In{})
+ _outPtrType = reflect.TypeOf((*Out)(nil))
+ _outType = reflect.TypeOf(Out{})
+)
+
+// Placeholder type placed in dig.In/dig.out to make their special nature
+// obvious in godocs.
+// Otherwise they will appear as plain empty structs.
+type digSentinel struct{}
+
+// In may be embedded into structs to request dig to treat them as special
+// parameter structs. When a constructor accepts such a struct, instead of the
+// struct becoming a dependency for that constructor, all its fields become
+// dependencies instead. See the section on Parameter Objects in the
+// package-level documentation for more information.
+//
+// Fields of the struct may optionally be tagged to customize the behavior of
+// dig. The following tags are supported,
+//
+// name Requests a value with the same name and type from the
+// container. See Named Values for more information.
+// optional If set to true, indicates that the dependency is optional and
+// the constructor gracefully handles its absence.
+// group Name of the Value Group from which this field will be filled.
+// The field must be a slice type. See Value Groups in the
+// package documentation for more information.
+type In struct{ _ digSentinel }
+
+// Out is an embeddable type that signals to dig that the returned
+// struct should be treated differently. Instead of the struct itself
+// becoming part of the container, all members of the struct will.
+
+// Out may be embedded into structs to request dig to treat them as special
+// result structs. When a constructor returns such a struct, instead of the
+// struct becoming a result of the constructor, all its fields become results
+// of the constructor. See the section on Result Objects in the package-level
+// documentation for more information.
+//
+// Fields of the struct may optionally be tagged to customize the behavior of
+// dig. The following tags are supported,
+//
+// name Specifies the name of the value. Only a field on a dig.In
+// struct with the same 'name' annotation can receive this
+// value. See Named Values for more information.
+// group Name of the Value Group to which this field's value is being
+// sent. See Value Groups in the package documentation for more
+// information.
+type Out struct{ _ digSentinel }
+
+func isError(t reflect.Type) bool {
+ return t.Implements(_errType)
+}
+
+// IsIn checks whether the given struct is a dig.In struct. A struct qualifies
+// as a dig.In struct if it embeds the dig.In type or if any struct that it
+// embeds is a dig.In struct. The parameter may be the reflect.Type of the
+// struct rather than the struct itself.
+//
+// A struct MUST qualify as a dig.In struct for its fields to be treated
+// specially by dig.
+//
+// See the documentation for dig.In for a comprehensive list of supported
+// tags.
+func IsIn(o interface{}) bool {
+ return embedsType(o, _inType)
+}
+
+// IsOut checks whether the given struct is a dig.Out struct. A struct
+// qualifies as a dig.Out struct if it embeds the dig.Out type or if any
+// struct that it embeds is a dig.Out struct. The parameter may be the
+// reflect.Type of the struct rather than the struct itself.
+//
+// A struct MUST qualify as a dig.Out struct for its fields to be treated
+// specially by dig.
+//
+// See the documentation for dig.Out for a comprehensive list of supported
+// tags.
+func IsOut(o interface{}) bool {
+ return embedsType(o, _outType)
+}
+
+// Returns true if t embeds e or if any of the types embedded by t embed e.
+func embedsType(i interface{}, e reflect.Type) bool {
+ // TODO: this function doesn't consider e being a pointer.
+ // given `type A foo { *In }`, this function would return false for
+ // embedding dig.In, which makes for some extra error checking in places
+ // that call this function. Might be worthwhile to consider reflect.Indirect
+ // usage to clean up the callers.
+
+ if i == nil {
+ return false
+ }
+
+ // maybe it's already a reflect.Type
+ t, ok := i.(reflect.Type)
+ if !ok {
+ // take the type if it's not
+ t = reflect.TypeOf(i)
+ }
+
+ // We are going to do a breadth-first search of all embedded fields.
+ types := list.New()
+ types.PushBack(t)
+ for types.Len() > 0 {
+ t := types.Remove(types.Front()).(reflect.Type)
+
+ if t == e {
+ return true
+ }
+
+ if t.Kind() != reflect.Struct {
+ continue
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Anonymous {
+ types.PushBack(f.Type)
+ }
+ }
+ }
+
+ // If perf is an issue, we can cache known In objects and Out objects in a
+ // map[reflect.Type]struct{}.
+ return false
+}
+
+// Checks if a field of an In struct is optional.
+func isFieldOptional(f reflect.StructField) (bool, error) {
+ tag := f.Tag.Get(_optionalTag)
+ if tag == "" {
+ return false, nil
+ }
+
+ optional, err := strconv.ParseBool(tag)
+ if err != nil {
+ err = newErrInvalidInput(
+ fmt.Sprintf("invalid value %q for %q tag on field %v", tag, _optionalTag, f.Name), err)
+ }
+
+ return optional, err
+}
diff --git a/vendor/go.uber.org/dig/internal/digerror/errors.go b/vendor/go.uber.org/dig/internal/digerror/errors.go
new file mode 100644
index 000000000..13dde3dce
--- /dev/null
+++ b/vendor/go.uber.org/dig/internal/digerror/errors.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package digerror
+
+import (
+ "fmt"
+)
+
+// BugPanicf panics with the provided message directing users to GitHub issues
+// creation page.
+func BugPanicf(msg string, args ...interface{}) {
+ panic(fmt.Sprintf("It looks like you have found a bug in dig. "+
+ "Please file an issue at https://github.com/uber-go/dig/issues/new "+
+ "and provide the following message: "+
+ msg, args...))
+}
diff --git a/vendor/go.uber.org/dig/internal/digreflect/func.go b/vendor/go.uber.org/dig/internal/digreflect/func.go
new file mode 100644
index 000000000..8554ed8db
--- /dev/null
+++ b/vendor/go.uber.org/dig/internal/digreflect/func.go
@@ -0,0 +1,125 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package digreflect
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "runtime"
+ "strings"
+)
+
+// Func contains runtime information about a function.
+type Func struct {
+ // Name of the function.
+ Name string
+
+ // Name of the package in which this function is defined.
+ Package string
+
+ // Path to the file in which this function is defined.
+ File string
+
+ // Line number in the file at which this function is defined.
+ Line int
+}
+
+// String returns a string representation of the function.
+func (f *Func) String() string {
+ return fmt.Sprint(f)
+}
+
+// Format implements fmt.Formatter for Func, printing a single-line
+// representation for %v and a multi-line one for %+v.
+func (f *Func) Format(w fmt.State, c rune) {
+ if w.Flag('+') && c == 'v' {
+ // "path/to/package".MyFunction
+ // path/to/file.go:42
+ fmt.Fprintf(w, "%q.%v", f.Package, f.Name)
+ fmt.Fprintf(w, "\n\t%v:%v", f.File, f.Line)
+ } else {
+ // "path/to/package".MyFunction (path/to/file.go:42)
+ fmt.Fprintf(w, "%q.%v (%v:%v)", f.Package, f.Name, f.File, f.Line)
+ }
+}
+
+// InspectFunc inspects and returns runtime information about the given
+// function.
+func InspectFunc(function interface{}) *Func {
+ fptr := reflect.ValueOf(function).Pointer()
+ return InspectFuncPC(fptr)
+}
+
+// InspectFuncPC inspects and returns runtime information about the function
+// at the given program counter address.
+func InspectFuncPC(pc uintptr) *Func {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return nil
+ }
+ pkgName, funcName := splitFuncName(f.Name())
+ fileName, lineNum := f.FileLine(pc)
+ return &Func{
+ Name: funcName,
+ Package: pkgName,
+ File: fileName,
+ Line: lineNum,
+ }
+}
+
+const _vendor = "/vendor/"
+
+func splitFuncName(function string) (pname string, fname string) {
+ if len(function) == 0 {
+ return
+ }
+
+ // We have something like "path.to/my/pkg.MyFunction". If the function is
+ // a closure, it is something like, "path.to/my/pkg.MyFunction.func1".
+
+ idx := 0
+
+ // Everything up to the first "." after the last "/" is the package name.
+ // Everything after the "." is the full function name.
+ if i := strings.LastIndex(function, "/"); i >= 0 {
+ idx = i
+ }
+ if i := strings.Index(function[idx:], "."); i >= 0 {
+ idx += i
+ }
+ pname, fname = function[:idx], function[idx+1:]
+
+ // The package may be vendored.
+ if i := strings.Index(pname, _vendor); i > 0 {
+ pname = pname[i+len(_vendor):]
+ }
+
+ // Package names are URL-encoded to avoid ambiguity in the case where the
+ // package name contains ".git". Otherwise, "foo/bar.git.MyFunction" would
+ // mean that "git" is the top-level function and "MyFunction" is embedded
+ // inside it.
+ if unescaped, err := url.QueryUnescape(pname); err == nil {
+ pname = unescaped
+ }
+
+ return
+}
diff --git a/vendor/go.uber.org/dig/internal/dot/README.md b/vendor/go.uber.org/dig/internal/dot/README.md
new file mode 100644
index 000000000..1198ade54
--- /dev/null
+++ b/vendor/go.uber.org/dig/internal/dot/README.md
@@ -0,0 +1,61 @@
+# Dot
+
+The dot module generates a DOT file representation of a dependency graph.
+
+## Interpreting the graph
+
+The graph should be read from left to right. The leftmost node in the graph (the root node) depends
+on its dependency tree to the right. An arrow from node_a to node_b in the graph means that node_b
+is consumed by node_a and that node_b is a parameter of node_a. The rendered graph holds the
+following kinds of nodes,
+
+**Nodes:**
+
+- *Constructors* [Rectangles]: Takes parameters and produces results.
+- *Results* [Ovals]: Results inside a constructor are produced by that constructor. Results are consumed
+directly by other constructors and/or part of a group of results.
+- *Groups* [Diamonds]: Represent value groups in [fx](https://godoc.org/go.uber.org/fx). Multiple results can form a group. Any
+result linked to a group by an edge are members of that group. A group is a collection of results.
+Groups can also be parameters of constructors.
+
+**Edges:**
+
+- *Solid Arrows*: An arrow from node_a to node_b means that node_b is a parameter of node_a and that
+node_a depends on node_b.
+- *Dashed Arrows*: A dashed arrow from node_a to node_b represents an optional dependency that node_a
+has on node_b.
+
+**Graph Colors:**
+
+- *Red*: Graph nodes are the root cause failures.
+- *Orange*: Graph nodes are the transitive failures.
+
+## Testing and verifying changes
+
+Unit tests and visualize golden tests are run with
+
+```shell
+$ make test
+```
+
+You can visualize the effect of your code changes by visualizing generated test graphs as pngs.
+
+In the dig root directory, generate the graph DOT files with respect to your latest code changes.
+
+```shell
+$ go test -generate
+```
+
+Assuming that you have [graphviz](https://www.graphviz.org/) installed and are in the testdata directory,
+generate a png image representation of a graph for viewing.
+
+```shell
+$ dot -Tpng ${name_of_dot_file_in_testdata}.dot -o ${name_of_dot_file_in_testdata}.png
+$ open ${name_of_dot_file_in_testdata}.png
+```
+
+## Graph Pruning
+
+If dot.Visualize is used to visualize an error graph, non-failing nodes are pruned out of the graph
+to make the error graph more readable to the user. Pruning increases readability since successful
+nodes clutter the graph and do not help the user debug errors.
diff --git a/vendor/go.uber.org/dig/internal/dot/graph.go b/vendor/go.uber.org/dig/internal/dot/graph.go
new file mode 100644
index 000000000..3706fa119
--- /dev/null
+++ b/vendor/go.uber.org/dig/internal/dot/graph.go
@@ -0,0 +1,466 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dot
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// ErrorType of a constructor or group is updated when they fail to build.
+type ErrorType int
+
+const (
+ noError ErrorType = iota
+ rootCause
+ transitiveFailure
+)
+
+// CtorID is a unique numeric identifier for constructors.
+type CtorID uintptr
+
+// Ctor encodes a constructor provided to the container for the DOT graph.
+type Ctor struct {
+ Name string
+ Package string
+ File string
+ Line int
+ ID CtorID
+ Params []*Param
+ GroupParams []*Group
+ Results []*Result
+ ErrorType ErrorType
+}
+
+// removeParam deletes the dependency on the provided result's nodeKey.
+// This is used to prune links to results of deleted constructors.
+func (c *Ctor) removeParam(k nodeKey) {
+ var pruned []*Param
+ for _, p := range c.Params {
+ if k != p.nodeKey() {
+ pruned = append(pruned, p)
+ }
+ }
+ c.Params = pruned
+}
+
+type nodeKey struct {
+ t reflect.Type
+ name string
+ group string
+}
+
+// Node is a single node in a graph and is embedded into Params and Results.
+type Node struct {
+ Type reflect.Type
+ Name string
+ Group string
+}
+
+func (n *Node) nodeKey() nodeKey {
+ return nodeKey{t: n.Type, name: n.Name, group: n.Group}
+}
+
+// Param is a parameter node in the graph. Parameters are the input to constructors.
+type Param struct {
+ *Node
+
+ Optional bool
+}
+
+// Result is a result node in the graph. Results are the output of constructors.
+type Result struct {
+ *Node
+
+ // GroupIndex is added to differentiate grouped values from one another.
+ // Since grouped values have the same type and group, their Node / string
+ // representations are the same so we need indices to uniquely identify
+ // the values.
+ GroupIndex int
+}
+
+// Group is a group node in the graph. Group represents an fx value group.
+type Group struct {
+ // Type is the type of values in the group.
+ Type reflect.Type
+ Name string
+ Results []*Result
+ ErrorType ErrorType
+}
+
+func (g *Group) nodeKey() nodeKey {
+ return nodeKey{t: g.Type, group: g.Name}
+}
+
+// TODO(rhang): Avoid linear search to discover group results that should be pruned.
+func (g *Group) removeResult(r *Result) {
+ var pruned []*Result
+ for _, rg := range g.Results {
+ if r.GroupIndex != rg.GroupIndex {
+ pruned = append(pruned, rg)
+ }
+ }
+ g.Results = pruned
+}
+
+// Graph is the DOT-format graph in a Container.
+type Graph struct {
+ Ctors []*Ctor
+ ctorMap map[CtorID]*Ctor
+
+ Groups []*Group
+ groupMap map[nodeKey]*Group
+
+ consumers map[nodeKey][]*Ctor
+
+ Failed *FailedNodes
+}
+
+// FailedNodes is the nodes that failed in the graph.
+type FailedNodes struct {
+ // RootCauses is a list of the point of failures. They are the root causes
+ // of failed invokes and can be either missing types (not provided) or
+ // error types (error providing).
+ RootCauses []*Result
+
+ // TransitiveFailures is the list of nodes that failed to build due to
+ // missing/failed dependencies.
+ TransitiveFailures []*Result
+
+ // ctors is a collection of failed constructors IDs that are populated as the graph is
+ // traversed for errors.
+ ctors map[CtorID]struct{}
+
+ // Groups is a collection of failed groupKeys that is populated as the graph is traversed
+ // for errors.
+ groups map[nodeKey]struct{}
+}
+
+// NewGraph creates an empty graph.
+func NewGraph() *Graph {
+ return &Graph{
+ ctorMap: make(map[CtorID]*Ctor),
+ groupMap: make(map[nodeKey]*Group),
+ consumers: make(map[nodeKey][]*Ctor),
+ Failed: &FailedNodes{
+ ctors: make(map[CtorID]struct{}),
+ groups: make(map[nodeKey]struct{}),
+ },
+ }
+}
+
+// NewGroup creates a new group with information in the groupKey.
+func NewGroup(k nodeKey) *Group {
+ return &Group{
+ Type: k.t,
+ Name: k.group,
+ }
+}
+
+// AddCtor adds the constructor with paramList and resultList into the graph.
+func (dg *Graph) AddCtor(c *Ctor, paramList []*Param, resultList []*Result) {
+ var (
+ params []*Param
+ groupParams []*Group
+ )
+
+ // Loop through the paramList to separate them into regular params and
+ // grouped params. For grouped params, we use getGroup to find the actual
+ // group.
+ for _, param := range paramList {
+ if param.Group == "" {
+ // Not a value group.
+ params = append(params, param)
+ continue
+ }
+
+ k := nodeKey{t: param.Type.Elem(), group: param.Group}
+ group := dg.getGroup(k)
+ groupParams = append(groupParams, group)
+ }
+
+ for _, result := range resultList {
+ // If the result is a grouped value, we want to update its GroupIndex
+ // and add it to the Group.
+ if result.Group != "" {
+ dg.addToGroup(result, c.ID)
+ }
+ }
+
+ c.Params = params
+ c.GroupParams = groupParams
+ c.Results = resultList
+
+ // Track which constructors consume a parameter.
+ for _, p := range paramList {
+ k := p.nodeKey()
+ dg.consumers[k] = append(dg.consumers[k], c)
+ }
+
+ dg.Ctors = append(dg.Ctors, c)
+ dg.ctorMap[c.ID] = c
+}
+
+func (dg *Graph) failNode(r *Result, isRootCause bool) {
+ if isRootCause {
+ dg.addRootCause(r)
+ } else {
+ dg.addTransitiveFailure(r)
+ }
+}
+
+// AddMissingNodes adds missing nodes to the list of failed Results in the graph.
+func (dg *Graph) AddMissingNodes(results []*Result) {
+ // The failure(s) are root causes if there are no other failures.
+ isRootCause := len(dg.Failed.RootCauses) == 0
+
+ for _, r := range results {
+ dg.failNode(r, isRootCause)
+ }
+}
+
+// FailNodes adds results to the list of failed Results in the graph, and
+// updates the state of the constructor with the given id accordingly.
+func (dg *Graph) FailNodes(results []*Result, id CtorID) {
+ // This failure is the root cause if there are no other failures.
+ isRootCause := len(dg.Failed.RootCauses) == 0
+ dg.Failed.ctors[id] = struct{}{}
+
+ for _, r := range results {
+ dg.failNode(r, isRootCause)
+ }
+
+ if c, ok := dg.ctorMap[id]; ok {
+ if isRootCause {
+ c.ErrorType = rootCause
+ } else {
+ c.ErrorType = transitiveFailure
+ }
+ }
+}
+
+// FailGroupNodes finds and adds the failed grouped nodes to the list of failed
+// Results in the graph, and updates the state of the group and constructor
+// with the given id accordingly.
+func (dg *Graph) FailGroupNodes(name string, t reflect.Type, id CtorID) {
+ // This failure is the root cause if there are no other failures.
+ isRootCause := len(dg.Failed.RootCauses) == 0
+
+ k := nodeKey{t: t, group: name}
+ group := dg.getGroup(k)
+
+ // If the ctor does not exist it cannot be failed.
+ if _, ok := dg.ctorMap[id]; !ok {
+ return
+ }
+
+ // Track which constructors and groups have failed.
+ dg.Failed.ctors[id] = struct{}{}
+ dg.Failed.groups[k] = struct{}{}
+
+ for _, r := range dg.ctorMap[id].Results {
+ if r.Type == t && r.Group == name {
+ dg.failNode(r, isRootCause)
+ }
+ }
+
+ if c, ok := dg.ctorMap[id]; ok {
+ if isRootCause {
+ group.ErrorType = rootCause
+ c.ErrorType = rootCause
+ } else {
+ group.ErrorType = transitiveFailure
+ c.ErrorType = transitiveFailure
+ }
+ }
+}
+
+// getGroup finds the group by nodeKey from the graph. If it is not available,
+// a new group is created and returned.
+func (dg *Graph) getGroup(k nodeKey) *Group {
+ g, ok := dg.groupMap[k]
+ if !ok {
+ g = NewGroup(k)
+ dg.groupMap[k] = g
+ dg.Groups = append(dg.Groups, g)
+ }
+ return g
+}
+
+// addToGroup adds a newly provided grouped result to the appropriate group.
+func (dg *Graph) addToGroup(r *Result, id CtorID) {
+ k := nodeKey{t: r.Type, group: r.Group}
+ group := dg.getGroup(k)
+
+ r.GroupIndex = len(group.Results)
+ group.Results = append(group.Results, r)
+}
+
+// PruneSuccess removes elements from the graph that do not have failed results.
+// Removing elements that do not have failing results makes the graph easier to debug,
+// since non-failing nodes and edges can clutter the graph and don't help the user debug.
+func (dg *Graph) PruneSuccess() {
+ dg.pruneCtors(dg.Failed.ctors)
+ dg.pruneGroups(dg.Failed.groups)
+}
+
+// pruneCtors removes constructors from the graph that do not have failing Results.
+func (dg *Graph) pruneCtors(failed map[CtorID]struct{}) {
+ var pruned []*Ctor
+ for _, c := range dg.Ctors {
+ if _, ok := failed[c.ID]; ok {
+ pruned = append(pruned, c)
+ continue
+ }
+ // If a constructor is deleted, the constructor's stale result references need to
+ // be removed from that result's Group and/or consuming constructor.
+ dg.pruneCtorParams(c, dg.consumers)
+ dg.pruneGroupResults(c, dg.groupMap)
+ delete(dg.ctorMap, c.ID)
+ }
+
+ dg.Ctors = pruned
+}
+
+// pruneGroups removes groups from the graph that do not have failing results.
+func (dg *Graph) pruneGroups(failed map[nodeKey]struct{}) {
+ var pruned []*Group
+ for _, g := range dg.Groups {
+ k := g.nodeKey()
+ if _, ok := failed[k]; ok {
+ pruned = append(pruned, g)
+ continue
+ }
+ delete(dg.groupMap, k)
+ }
+ dg.Groups = pruned
+
+ dg.pruneCtorGroupParams(dg.groupMap)
+}
+
+// pruneCtorParams removes results of the constructor argument that are still referenced in the
+// Params of constructors that consume those results. If the results in the constructor are found
+// in the params of a consuming constructor that result should be removed.
+func (dg *Graph) pruneCtorParams(c *Ctor, consumers map[nodeKey][]*Ctor) {
+ for _, r := range c.Results {
+ for _, ctor := range consumers[r.nodeKey()] {
+ ctor.removeParam(r.nodeKey())
+ }
+ }
+}
+
+// pruneCtorGroupParams removes constructor results that are still referenced in the GroupParams of
+// constructors that consume those results.
+func (dg *Graph) pruneCtorGroupParams(groups map[nodeKey]*Group) {
+ for _, c := range dg.Ctors {
+ var pruned []*Group
+ for _, gp := range c.GroupParams {
+ k := gp.nodeKey()
+ if _, ok := groups[k]; ok {
+ pruned = append(pruned, gp)
+ }
+ }
+ c.GroupParams = pruned
+ }
+}
+
+// pruneGroupResults removes results of the constructor argument that are still referenced in
+// the Group object that contains that result. If a group no longer exists references to that
+// should should be removed.
+func (dg *Graph) pruneGroupResults(c *Ctor, groups map[nodeKey]*Group) {
+ for _, r := range c.Results {
+ k := r.nodeKey()
+ if k.group == "" {
+ continue
+ }
+
+ g, ok := groups[k]
+ if ok {
+ g.removeResult(r)
+ }
+ }
+}
+
+// String implements fmt.Stringer for Param.
+func (p *Param) String() string {
+ if p.Name != "" {
+ return fmt.Sprintf("%v[name=%v]", p.Type.String(), p.Name)
+ }
+ return p.Type.String()
+}
+
+// String implements fmt.Stringer for Result.
+func (r *Result) String() string {
+ switch {
+ case r.Name != "":
+ return fmt.Sprintf("%v[name=%v]", r.Type.String(), r.Name)
+ case r.Group != "":
+ return fmt.Sprintf("%v[group=%v]%v", r.Type.String(), r.Group, r.GroupIndex)
+ default:
+ return r.Type.String()
+ }
+}
+
+// String implements fmt.Stringer for Group.
+func (g *Group) String() string {
+ return fmt.Sprintf("[type=%v group=%v]", g.Type.String(), g.Name)
+}
+
+// Attributes composes and returns a string of the Result node's attributes.
+func (r *Result) Attributes() string {
+ switch {
+ case r.Name != "":
+ return fmt.Sprintf(`label=<%v
Name: %v>`, r.Type, r.Name)
+ case r.Group != "":
+ return fmt.Sprintf(`label=<%v
Group: %v>`, r.Type, r.Group)
+ default:
+ return fmt.Sprintf(`label=<%v>`, r.Type)
+ }
+}
+
+// Attributes composes and returns a string of the Group node's attributes.
+func (g *Group) Attributes() string {
+ attr := fmt.Sprintf(`shape=diamond label=<%v
Group: %v>`, g.Type, g.Name)
+ if g.ErrorType != noError {
+ attr += " color=" + g.ErrorType.Color()
+ }
+ return attr
+}
+
+// Color returns the color representation of each ErrorType.
+func (s ErrorType) Color() string {
+ switch s {
+ case rootCause:
+ return "red"
+ case transitiveFailure:
+ return "orange"
+ default:
+ return "black"
+ }
+}
+
+func (dg *Graph) addRootCause(r *Result) {
+ dg.Failed.RootCauses = append(dg.Failed.RootCauses, r)
+}
+
+func (dg *Graph) addTransitiveFailure(r *Result) {
+ dg.Failed.TransitiveFailures = append(dg.Failed.TransitiveFailures, r)
+}
diff --git a/vendor/go.uber.org/dig/internal/graph/graph.go b/vendor/go.uber.org/dig/internal/graph/graph.go
new file mode 100644
index 000000000..abe6842f1
--- /dev/null
+++ b/vendor/go.uber.org/dig/internal/graph/graph.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package graph
+
+// Graph represents a simple interface for representation
+// of a directed graph.
+// It is assumed that each node in the graph is uniquely
+// identified with an incremental positive integer (i.e. 1, 2, 3...).
+// A value of 0 for a node represents a sentinel error value.
+type Graph interface {
+ // Order returns the total number of nodes in the graph
+ Order() int
+
+ // EdgesFrom returns a list of integers that each
+ // represents a node that has an edge from node u.
+ EdgesFrom(u int) []int
+}
+
+// IsAcyclic uses depth-first search to find cycles
+// in a generic graph represented by Graph interface.
+// If a cycle is found, it returns a list of nodes that
+// are in the cyclic path, identified by their orders.
+func IsAcyclic(g Graph) (bool, []int) {
+ // cycleStart is a node that introduces a cycle in
+ // the graph. Values in the range [1, g.Order()) mean
+ // that there exists a cycle in g.
+ info := newCycleInfo(g.Order())
+
+ for i := 0; i < g.Order(); i++ {
+ info.Reset()
+
+ cycle := isAcyclic(g, i, info, nil /* cycle path */)
+ if len(cycle) > 0 {
+ return false, cycle
+ }
+ }
+
+ return true, nil
+}
+
+// isAcyclic traverses the given graph starting from a specific node
+// using depth-first search using recursion. If a cycle is detected,
+// it returns the node that contains the "last" edge that introduces
+// a cycle.
+// For example, running isAcyclic starting from 1 on the following
+// graph will return 3.
+//
+// 1 -> 2 -> 3 -> 1
+func isAcyclic(g Graph, u int, info cycleInfo, path []int) []int {
+ // We've already verified that there are no cycles from this node.
+ if info[u].Visited {
+ return nil
+ }
+ info[u].Visited = true
+ info[u].OnStack = true
+
+ path = append(path, u)
+ for _, v := range g.EdgesFrom(u) {
+ if !info[v].Visited {
+ if cycle := isAcyclic(g, v, info, path); len(cycle) > 0 {
+ return cycle
+ }
+ } else if info[v].OnStack {
+ // We've found a cycle, and we have a full path back.
+ // Prune it down to just the cyclic nodes.
+ cycle := path
+ for i := len(cycle) - 1; i >= 0; i-- {
+ if cycle[i] == v {
+ cycle = cycle[i:]
+ break
+ }
+ }
+
+ // Complete the cycle by adding this node to it.
+ return append(cycle, v)
+ }
+ }
+ info[u].OnStack = false
+ return nil
+}
+
+// cycleNode keeps track of a single node's info for cycle detection.
+type cycleNode struct {
+ Visited bool
+ OnStack bool
+}
+
+// cycleInfo contains information about each node while we're trying to find
+// cycles.
+type cycleInfo []cycleNode
+
+func newCycleInfo(order int) cycleInfo {
+ return make(cycleInfo, order)
+}
+
+func (info cycleInfo) Reset() {
+ for i := range info {
+ info[i].OnStack = false
+ }
+}
diff --git a/vendor/go.uber.org/dig/invoke.go b/vendor/go.uber.org/dig/invoke.go
new file mode 100644
index 000000000..8a70121ae
--- /dev/null
+++ b/vendor/go.uber.org/dig/invoke.go
@@ -0,0 +1,211 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "go.uber.org/dig/internal/digreflect"
+ "go.uber.org/dig/internal/graph"
+ "reflect"
+)
+
+// An InvokeOption modifies the default behavior of Invoke.
+type InvokeOption interface {
+ applyInvokeOption(*invokeOptions)
+}
+
+type invokeOptions struct {
+ Info *InvokeInfo
+}
+
+// InvokeInfo provides information about an Invoke.
+type InvokeInfo struct {
+ Inputs []*Input
+}
+
+// FillInvokeInfo is an InvokeOption that writes information on the types
+// accepted by the Invoke function into the specified InvokeInfo.
+// For example:
+//
+// var info dig.InvokeInfo
+// err := c.Invoke(func(string, int){}, dig.FillInvokeInfo(&info))
+//
+// info.Inputs[0].String() will be string.
+// info.Inputs[1].String() will be int.
+func FillInvokeInfo(info *InvokeInfo) InvokeOption {
+ return fillInvokeInfoOption{info: info}
+}
+
+type fillInvokeInfoOption struct {
+ info *InvokeInfo
+}
+
+func (o fillInvokeInfoOption) String() string {
+ return fmt.Sprintf("FillInvokeInfo(%p)", o.info)
+}
+
+func (o fillInvokeInfoOption) applyInvokeOption(opts *invokeOptions) {
+ opts.Info = o.info
+}
+
+// Invoke runs the given function after instantiating its dependencies.
+//
+// Any arguments that the function has are treated as its dependencies. The
+// dependencies are instantiated in an unspecified order along with any
+// dependencies that they might have.
+//
+// The function may return an error to indicate failure. The error will be
+// returned to the caller as-is.
+//
+// If the [RecoverFromPanics] option was given to the container and a panic
+// occurs when invoking, a [PanicError] with the panic contained will be
+// returned. See [PanicError] for more info.
+func (c *Container) Invoke(function interface{}, opts ...InvokeOption) error {
+ return c.scope.Invoke(function, opts...)
+}
+
+// Invoke runs the given function after instantiating its dependencies.
+//
+// Any arguments that the function has are treated as its dependencies. The
+// dependencies are instantiated in an unspecified order along with any
+// dependencies that they might have.
+//
+// The function may return an error to indicate failure. The error will be
+// returned to the caller as-is.
+func (s *Scope) Invoke(function interface{}, opts ...InvokeOption) (err error) {
+ ftype := reflect.TypeOf(function)
+ if ftype == nil {
+ return newErrInvalidInput("can't invoke an untyped nil", nil)
+ }
+ if ftype.Kind() != reflect.Func {
+ return newErrInvalidInput(
+ fmt.Sprintf("can't invoke non-function %v (type %v)", function, ftype), nil)
+ }
+
+ pl, err := newParamList(ftype, s)
+ if err != nil {
+ return err
+ }
+
+ if err := shallowCheckDependencies(s, pl); err != nil {
+ return errMissingDependencies{
+ Func: digreflect.InspectFunc(function),
+ Reason: err,
+ }
+ }
+
+ if !s.isVerifiedAcyclic {
+ if ok, cycle := graph.IsAcyclic(s.gh); !ok {
+ return newErrInvalidInput("cycle detected in dependency graph", s.cycleDetectedError(cycle))
+ }
+ s.isVerifiedAcyclic = true
+ }
+
+ args, err := pl.BuildList(s)
+ if err != nil {
+ return errArgumentsFailed{
+ Func: digreflect.InspectFunc(function),
+ Reason: err,
+ }
+ }
+ if s.recoverFromPanics {
+ defer func() {
+ if p := recover(); p != nil {
+ err = PanicError{
+ fn: digreflect.InspectFunc(function),
+ Panic: p,
+ }
+ }
+ }()
+ }
+
+ var options invokeOptions
+ for _, o := range opts {
+ o.applyInvokeOption(&options)
+ }
+
+ // Record info for the invoke if requested
+ if info := options.Info; info != nil {
+ params := pl.DotParam()
+ info.Inputs = make([]*Input, len(params))
+ for i, p := range params {
+ info.Inputs[i] = &Input{
+ t: p.Type,
+ optional: p.Optional,
+ name: p.Name,
+ group: p.Group,
+ }
+ }
+
+ }
+
+ returned := s.invokerFn(reflect.ValueOf(function), args)
+ if len(returned) == 0 {
+ return nil
+ }
+ if last := returned[len(returned)-1]; isError(last.Type()) {
+ if err, _ := last.Interface().(error); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Checks that all direct dependencies of the provided parameters are present in
+// the container. Returns an error if not.
+func shallowCheckDependencies(c containerStore, pl paramList) error {
+ var err errMissingTypes
+
+ missingDeps := findMissingDependencies(c, pl.Params...)
+ for _, dep := range missingDeps {
+ err = append(err, newErrMissingTypes(c, key{name: dep.Name, t: dep.Type})...)
+ }
+
+ if len(err) > 0 {
+ return err
+ }
+ return nil
+}
+
+func findMissingDependencies(c containerStore, params ...param) []paramSingle {
+ var missingDeps []paramSingle
+
+ for _, param := range params {
+ switch p := param.(type) {
+ case paramSingle:
+ allProviders := c.getAllValueProviders(p.Name, p.Type)
+ _, hasDecoratedValue := c.getDecoratedValue(p.Name, p.Type)
+ // This means that there is no provider that provides this value,
+ // and it is NOT being decorated and is NOT optional.
+ // In the case that there is no providers but there is a decorated value
+ // of this type, it can be provided safely so we can safely skip this.
+ if len(allProviders) == 0 && !hasDecoratedValue && !p.Optional {
+ missingDeps = append(missingDeps, p)
+ }
+ case paramObject:
+ for _, f := range p.Fields {
+ missingDeps = append(missingDeps, findMissingDependencies(c, f.Param)...)
+ }
+ }
+ }
+ return missingDeps
+}
diff --git a/vendor/go.uber.org/dig/param.go b/vendor/go.uber.org/dig/param.go
new file mode 100644
index 000000000..d584fc237
--- /dev/null
+++ b/vendor/go.uber.org/dig/param.go
@@ -0,0 +1,668 @@
+// Copyright (c) 2019-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "go.uber.org/dig/internal/digerror"
+ "go.uber.org/dig/internal/dot"
+)
+
+// The param interface represents a dependency for a constructor.
+//
+// The following implementations exist:
+//
+// paramList All arguments of the constructor.
+// paramSingle An explicitly requested type.
+// paramObject dig.In struct where each field in the struct can be another
+// param.
+// paramGroupedSlice
+// A slice consuming a value group. This will receive all
+// values produced with a `group:".."` tag with the same name
+// as a slice.
+type param interface {
+ fmt.Stringer
+
+ // Build this dependency and any of its dependencies from the provided
+ // Container.
+ //
+ // This MAY panic if the param does not produce a single value.
+ Build(store containerStore) (reflect.Value, error)
+
+ // DotParam returns a slice of dot.Param(s).
+ DotParam() []*dot.Param
+}
+
+var (
+ _ param = paramSingle{}
+ _ param = paramObject{}
+ _ param = paramList{}
+ _ param = paramGroupedSlice{}
+)
+
+// newParam builds a param from the given type. If the provided type is a
+// dig.In struct, an paramObject will be returned.
+func newParam(t reflect.Type, c containerStore) (param, error) {
+ switch {
+ case IsOut(t) || (t.Kind() == reflect.Ptr && IsOut(t.Elem())) || embedsType(t, _outPtrType):
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot depend on result objects: %v embeds a dig.Out", t), nil)
+ case IsIn(t):
+ return newParamObject(t, c)
+ case embedsType(t, _inPtrType):
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot build a parameter object by embedding *dig.In, embed dig.In instead: %v embeds *dig.In", t), nil)
+ case t.Kind() == reflect.Ptr && IsIn(t.Elem()):
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot depend on a pointer to a parameter object, use a value instead: %v is a pointer to a struct that embeds dig.In", t), nil)
+ default:
+ return paramSingle{Type: t}, nil
+ }
+}
+
+// paramList holds all arguments of the constructor as params.
+//
+// NOTE: Build() MUST NOT be called on paramList. Instead, BuildList
+// must be called.
+type paramList struct {
+ ctype reflect.Type // type of the constructor
+
+ Params []param
+}
+
+func (pl paramList) DotParam() []*dot.Param {
+ var types []*dot.Param
+ for _, param := range pl.Params {
+ types = append(types, param.DotParam()...)
+ }
+ return types
+}
+
+func (pl paramList) String() string {
+ args := make([]string, len(pl.Params))
+ for i, p := range pl.Params {
+ args[i] = p.String()
+ }
+ return fmt.Sprint(args)
+}
+
+// newParamList builds a paramList from the provided constructor type.
+//
+// Variadic arguments of a constructor are ignored and not included as
+// dependencies.
+func newParamList(ctype reflect.Type, c containerStore) (paramList, error) {
+ numArgs := ctype.NumIn()
+ if ctype.IsVariadic() {
+ // NOTE: If the function is variadic, we skip the last argument
+ // because we're not filling variadic arguments yet. See #120.
+ numArgs--
+ }
+
+ pl := paramList{
+ ctype: ctype,
+ Params: make([]param, 0, numArgs),
+ }
+
+ for i := 0; i < numArgs; i++ {
+ p, err := newParam(ctype.In(i), c)
+ if err != nil {
+ return pl, newErrInvalidInput(fmt.Sprintf("bad argument %d", i+1), err)
+ }
+ pl.Params = append(pl.Params, p)
+ }
+
+ return pl, nil
+}
+
+func (pl paramList) Build(containerStore) (reflect.Value, error) {
+ digerror.BugPanicf("paramList.Build() must never be called")
+ panic("") // Unreachable, as BugPanicf above will panic.
+}
+
+// BuildList returns an ordered list of values which may be passed directly
+// to the underlying constructor.
+func (pl paramList) BuildList(c containerStore) ([]reflect.Value, error) {
+ args := make([]reflect.Value, len(pl.Params))
+ for i, p := range pl.Params {
+ var err error
+ args[i], err = p.Build(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return args, nil
+}
+
+// paramSingle is an explicitly requested type, optionally with a name.
+//
+// This object must be present in the graph as-is unless it's specified as
+// optional.
+type paramSingle struct {
+ Name string
+ Optional bool
+ Type reflect.Type
+}
+
+func (ps paramSingle) DotParam() []*dot.Param {
+ return []*dot.Param{
+ {
+ Node: &dot.Node{
+ Type: ps.Type,
+ Name: ps.Name,
+ },
+ Optional: ps.Optional,
+ },
+ }
+}
+
+func (ps paramSingle) String() string {
+ // tally.Scope[optional] means optional
+ // tally.Scope[optional, name="foo"] means named optional
+
+ var opts []string
+ if ps.Optional {
+ opts = append(opts, "optional")
+ }
+ if ps.Name != "" {
+ opts = append(opts, fmt.Sprintf("name=%q", ps.Name))
+ }
+
+ if len(opts) == 0 {
+ return fmt.Sprint(ps.Type)
+ }
+
+ return fmt.Sprintf("%v[%v]", ps.Type, strings.Join(opts, ", "))
+}
+
+// search the given container and its ancestors for a decorated value.
+func (ps paramSingle) getDecoratedValue(c containerStore) (reflect.Value, bool) {
+ for _, c := range c.storesToRoot() {
+ if v, ok := c.getDecoratedValue(ps.Name, ps.Type); ok {
+ return v, ok
+ }
+ }
+ return _noValue, false
+}
+
+// builds the parameter using decorators in all scopes that affect the
+// current scope, if there are any. If there are multiple Scopes that decorates
+// this parameter, the closest one to the Scope that invoked this will be used.
+// If there are no decorators associated with this parameter, _noValue is returned.
+func (ps paramSingle) buildWithDecorators(c containerStore) (v reflect.Value, found bool, err error) {
+ var (
+ d decorator
+ decoratingScope containerStore
+ )
+ stores := c.storesToRoot()
+
+ for _, s := range stores {
+ if d, found = s.getValueDecorator(ps.Name, ps.Type); !found {
+ continue
+ }
+ if d.State() == decoratorOnStack {
+ // This decorator is already being run.
+ // Avoid a cycle and look further.
+ d = nil
+ continue
+ }
+ decoratingScope = s
+ break
+ }
+ if !found || d == nil {
+ return _noValue, false, nil
+ }
+ if err = d.Call(decoratingScope); err != nil {
+ v, err = _noValue, errParamSingleFailed{
+ CtorID: 1,
+ Key: key{t: ps.Type, name: ps.Name},
+ Reason: err,
+ }
+ return v, found, err
+ }
+ v, _ = decoratingScope.getDecoratedValue(ps.Name, ps.Type)
+ return
+}
+
+func (ps paramSingle) Build(c containerStore) (reflect.Value, error) {
+ v, found, err := ps.buildWithDecorators(c)
+ if found {
+ return v, err
+ }
+
+ // Check whether the value is a decorated value first.
+ if v, ok := ps.getDecoratedValue(c); ok {
+ return v, nil
+ }
+
+ // Starting at the given container and working our way up its parents,
+ // find one that provides this dependency.
+ //
+ // Once found, we'll use that container for the rest of the invocation.
+ // Dependencies of this type will begin searching at that container,
+ // rather than starting at base.
+ var providers []provider
+ var providingContainer containerStore
+ for _, container := range c.storesToRoot() {
+ // first check if the scope already has cached a value for the type.
+ if v, ok := container.getValue(ps.Name, ps.Type); ok {
+ return v, nil
+ }
+ providers = container.getValueProviders(ps.Name, ps.Type)
+ if len(providers) > 0 {
+ providingContainer = container
+ break
+ }
+ }
+
+ if len(providers) == 0 {
+ if ps.Optional {
+ return reflect.Zero(ps.Type), nil
+ }
+ return _noValue, newErrMissingTypes(c, key{name: ps.Name, t: ps.Type})
+ }
+
+ for _, n := range providers {
+ err := n.Call(n.OrigScope())
+ if err == nil {
+ continue
+ }
+
+ // If we're missing dependencies but the parameter itself is optional,
+ // we can just move on.
+ if _, ok := err.(errMissingDependencies); ok && ps.Optional {
+ return reflect.Zero(ps.Type), nil
+ }
+
+ return _noValue, errParamSingleFailed{
+ CtorID: n.ID(),
+ Key: key{t: ps.Type, name: ps.Name},
+ Reason: err,
+ }
+ }
+
+ // If we get here, it's impossible for the value to be absent from the
+ // container.
+ v, _ = providingContainer.getValue(ps.Name, ps.Type)
+ return v, nil
+}
+
+// paramObject is a dig.In struct where each field is another param.
+//
+// This object is not expected in the graph as-is.
+type paramObject struct {
+ Type reflect.Type
+ Fields []paramObjectField
+ FieldOrders []int
+}
+
+func (po paramObject) DotParam() []*dot.Param {
+ var types []*dot.Param
+ for _, field := range po.Fields {
+ types = append(types, field.DotParam()...)
+ }
+ return types
+}
+
+func (po paramObject) String() string {
+ fields := make([]string, len(po.Fields))
+ for i, f := range po.Fields {
+ fields[i] = f.Param.String()
+ }
+ return strings.Join(fields, " ")
+}
+
+// getParamOrder returns the order(s) of a parameter type.
+func getParamOrder(gh *graphHolder, param param) []int {
+ var orders []int
+ switch p := param.(type) {
+ case paramSingle:
+ providers := gh.s.getAllValueProviders(p.Name, p.Type)
+ for _, provider := range providers {
+ orders = append(orders, provider.Order(gh.s))
+ }
+ case paramGroupedSlice:
+ // value group parameters have nodes of their own.
+ // We can directly return that here.
+ orders = append(orders, p.orders[gh.s])
+ case paramObject:
+ for _, pf := range p.Fields {
+ orders = append(orders, getParamOrder(gh, pf.Param)...)
+ }
+ }
+ return orders
+}
+
+// newParamObject builds an paramObject from the provided type. The type MUST
+// be a dig.In struct.
+func newParamObject(t reflect.Type, c containerStore) (paramObject, error) {
+ po := paramObject{Type: t}
+
+ // Check if the In type supports ignoring unexported fields.
+ var ignoreUnexported bool
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type == _inType {
+ var err error
+ ignoreUnexported, err = isIgnoreUnexportedSet(f)
+ if err != nil {
+ return po, err
+ }
+ break
+ }
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type == _inType {
+ // Skip over the dig.In embed.
+ continue
+ }
+ if f.PkgPath != "" && ignoreUnexported {
+ // Skip over an unexported field if it is allowed.
+ continue
+ }
+ pof, err := newParamObjectField(i, f, c)
+ if err != nil {
+ return po, newErrInvalidInput(
+ fmt.Sprintf("bad field %q of %v", f.Name, t), err)
+ }
+ po.Fields = append(po.Fields, pof)
+ }
+ return po, nil
+}
+
+func (po paramObject) Build(c containerStore) (reflect.Value, error) {
+ dest := reflect.New(po.Type).Elem()
+ // We have to build soft groups after all other fields, to avoid cases
+ // when a field calls a provider for a soft value group, but the value is
+ // not provided to it because the value group is declared before the field
+ var softGroupsQueue []paramObjectField
+ var fields []paramObjectField
+ for _, f := range po.Fields {
+ if p, ok := f.Param.(paramGroupedSlice); ok && p.Soft {
+ softGroupsQueue = append(softGroupsQueue, f)
+ continue
+ }
+ fields = append(fields, f)
+ }
+ fields = append(fields, softGroupsQueue...)
+ for _, f := range fields {
+ v, err := f.Build(c)
+ if err != nil {
+ return dest, err
+ }
+ dest.Field(f.FieldIndex).Set(v)
+ }
+ return dest, nil
+}
+
+// paramObjectField is a single field of a dig.In struct.
+type paramObjectField struct {
+ // Name of the field in the struct.
+ FieldName string
+
+ // Index of this field in the target struct.
+ //
+ // We need to track this separately because not all fields of the
+ // struct map to params.
+ FieldIndex int
+
+ // The dependency requested by this field.
+ Param param
+}
+
+func (pof paramObjectField) DotParam() []*dot.Param {
+ return pof.Param.DotParam()
+}
+
+func newParamObjectField(idx int, f reflect.StructField, c containerStore) (paramObjectField, error) {
+ pof := paramObjectField{
+ FieldName: f.Name,
+ FieldIndex: idx,
+ }
+
+ var p param
+ switch {
+ case f.PkgPath != "":
+ return pof, newErrInvalidInput(
+ fmt.Sprintf("unexported fields not allowed in dig.In, did you mean to export %q (%v)?", f.Name, f.Type), nil)
+
+ case f.Tag.Get(_groupTag) != "":
+ var err error
+ p, err = newParamGroupedSlice(f, c)
+ if err != nil {
+ return pof, err
+ }
+
+ default:
+ var err error
+ p, err = newParam(f.Type, c)
+ if err != nil {
+ return pof, err
+ }
+ }
+
+ if ps, ok := p.(paramSingle); ok {
+ ps.Name = f.Tag.Get(_nameTag)
+
+ var err error
+ ps.Optional, err = isFieldOptional(f)
+ if err != nil {
+ return pof, err
+ }
+
+ p = ps
+ }
+
+ pof.Param = p
+ return pof, nil
+}
+
+func (pof paramObjectField) Build(c containerStore) (reflect.Value, error) {
+ v, err := pof.Param.Build(c)
+ if err != nil {
+ return v, err
+ }
+ return v, nil
+}
+
+// paramGroupedSlice is a param which produces a slice of values with the same
+// group name.
+type paramGroupedSlice struct {
+ // Name of the group as specified in the `group:".."` tag.
+ Group string
+
+ // Type of the slice.
+ Type reflect.Type
+
+ // Soft is used to denote a soft dependency between this param and its
+ // constructors, if it's true its constructors are only called if they
+ // provide another value requested in the graph
+ Soft bool
+
+ orders map[*Scope]int
+}
+
+func (pt paramGroupedSlice) String() string {
+ // io.Reader[group="foo"] refers to a group of io.Readers called 'foo'
+ return fmt.Sprintf("%v[group=%q]", pt.Type.Elem(), pt.Group)
+}
+
+func (pt paramGroupedSlice) DotParam() []*dot.Param {
+ return []*dot.Param{
+ {
+ Node: &dot.Node{
+ Type: pt.Type,
+ Group: pt.Group,
+ },
+ },
+ }
+}
+
+// newParamGroupedSlice builds a paramGroupedSlice from the provided type with
+// the given name.
+//
+// The type MUST be a slice type.
+func newParamGroupedSlice(f reflect.StructField, c containerStore) (paramGroupedSlice, error) {
+ g, err := parseGroupString(f.Tag.Get(_groupTag))
+ if err != nil {
+ return paramGroupedSlice{}, err
+ }
+ pg := paramGroupedSlice{
+ Group: g.Name,
+ Type: f.Type,
+ orders: make(map[*Scope]int),
+ Soft: g.Soft,
+ }
+
+ name := f.Tag.Get(_nameTag)
+ optional, _ := isFieldOptional(f)
+ switch {
+ case f.Type.Kind() != reflect.Slice:
+ return pg, newErrInvalidInput(
+ fmt.Sprintf("value groups may be consumed as slices only: field %q (%v) is not a slice", f.Name, f.Type), nil)
+ case g.Flatten:
+ return pg, newErrInvalidInput(
+ fmt.Sprintf("cannot use flatten in parameter value groups: field %q (%v) specifies flatten", f.Name, f.Type), nil)
+ case name != "":
+ return pg, newErrInvalidInput(
+ fmt.Sprintf("cannot use named values with value groups: name:%q requested with group:%q", name, pg.Group), nil)
+ case optional:
+ return pg, newErrInvalidInput("value groups cannot be optional", nil)
+ }
+ c.newGraphNode(&pg, pg.orders)
+ return pg, nil
+}
+
+// retrieves any decorated values that may be committed in this scope, or
+// any of the parent Scopes. In the case where there are multiple scopes that
+// are decorating the same type, the closest scope in effect will be replacing
+// any decorated value groups provided in further scopes.
+func (pt paramGroupedSlice) getDecoratedValues(c containerStore) (reflect.Value, bool) {
+ for _, c := range c.storesToRoot() {
+ if items, ok := c.getDecoratedValueGroup(pt.Group, pt.Type); ok {
+ return items, true
+ }
+ }
+ return _noValue, false
+}
+
+// search the given container and its parents for matching group decorators
+// and call them to commit values. If any decorators return an error,
+// that error is returned immediately. If all decorators succeeds, nil is returned.
+// The order in which the decorators are invoked is from the top level scope to
+// the current scope, to account for decorators that decorate values that were
+// already decorated.
+func (pt paramGroupedSlice) callGroupDecorators(c containerStore) error {
+ stores := c.storesToRoot()
+ for i := len(stores) - 1; i >= 0; i-- {
+ c := stores[i]
+ if d, found := c.getGroupDecorator(pt.Group, pt.Type.Elem()); found {
+ if d.State() == decoratorOnStack {
+ // This decorator is already being run. Avoid cycle
+ // and look further.
+ continue
+ }
+ if err := d.Call(c); err != nil {
+ return errParamGroupFailed{
+ CtorID: d.ID(),
+ Key: key{group: pt.Group, t: pt.Type.Elem()},
+ Reason: err,
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// search the given container and its parent for matching group providers and
+// call them to commit values. If an error is encountered, return the number
+// of providers called and a non-nil error from the first provided.
+func (pt paramGroupedSlice) callGroupProviders(c containerStore) (int, error) {
+ itemCount := 0
+ for _, c := range c.storesToRoot() {
+ providers := c.getGroupProviders(pt.Group, pt.Type.Elem())
+ itemCount += len(providers)
+ for _, n := range providers {
+ if err := n.Call(c); err != nil {
+ return 0, errParamGroupFailed{
+ CtorID: n.ID(),
+ Key: key{group: pt.Group, t: pt.Type.Elem()},
+ Reason: err,
+ }
+ }
+ }
+ }
+ return itemCount, nil
+}
+
+func (pt paramGroupedSlice) Build(c containerStore) (reflect.Value, error) {
+ // do not call this if we are already inside a decorator since
+ // it will result in an infinite recursion. (i.e. decorate -> params.BuildList() -> Decorate -> params.BuildList...)
+ // this is safe since a value can be decorated at most once in a given scope.
+ if err := pt.callGroupDecorators(c); err != nil {
+ return _noValue, err
+ }
+
+ // Check if we have decorated values
+ if decoratedItems, ok := pt.getDecoratedValues(c); ok {
+ return decoratedItems, nil
+ }
+
+ // If we do not have any decorated values and the group isn't soft,
+ // find the providers and call them.
+ itemCount := 0
+ if !pt.Soft {
+ var err error
+ itemCount, err = pt.callGroupProviders(c)
+ if err != nil {
+ return _noValue, err
+ }
+ }
+
+ stores := c.storesToRoot()
+ result := reflect.MakeSlice(pt.Type, 0, itemCount)
+ for _, c := range stores {
+ result = reflect.Append(result, c.getValueGroup(pt.Group, pt.Type.Elem())...)
+ }
+ return result, nil
+}
+
+// Checks if ignoring unexported files in an In struct is allowed.
+// The struct field MUST be an _inType.
+func isIgnoreUnexportedSet(f reflect.StructField) (bool, error) {
+ tag := f.Tag.Get(_ignoreUnexportedTag)
+ if tag == "" {
+ return false, nil
+ }
+
+ allowed, err := strconv.ParseBool(tag)
+ if err != nil {
+ err = newErrInvalidInput(
+ fmt.Sprintf("invalid value %q for %q tag on field %v", tag, _ignoreUnexportedTag, f.Name), err)
+ }
+
+ return allowed, err
+}
diff --git a/vendor/go.uber.org/dig/provide.go b/vendor/go.uber.org/dig/provide.go
new file mode 100644
index 000000000..f565e7323
--- /dev/null
+++ b/vendor/go.uber.org/dig/provide.go
@@ -0,0 +1,665 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "go.uber.org/dig/internal/digreflect"
+ "go.uber.org/dig/internal/dot"
+ "go.uber.org/dig/internal/graph"
+)
+
+// A ProvideOption modifies the default behavior of Provide.
+type ProvideOption interface {
+ applyProvideOption(*provideOptions)
+}
+
+type provideOptions struct {
+ Name string
+ Group string
+ Info *ProvideInfo
+ As []interface{}
+ Location *digreflect.Func
+ Exported bool
+ Callback Callback
+}
+
+func (o *provideOptions) Validate() error {
+ if len(o.Group) > 0 {
+ if len(o.Name) > 0 {
+ return newErrInvalidInput(
+ fmt.Sprintf("cannot use named values with value groups: name:%q provided with group:%q", o.Name, o.Group), nil)
+ }
+ }
+
+ // Names must be representable inside a backquoted string. The only
+ // limitation for raw string literals as per
+ // https://golang.org/ref/spec#raw_string_lit is that they cannot contain
+ // backquotes.
+ if strings.ContainsRune(o.Name, '`') {
+ return newErrInvalidInput(
+ fmt.Sprintf("invalid dig.Name(%q): names cannot contain backquotes", o.Name), nil)
+ }
+ if strings.ContainsRune(o.Group, '`') {
+ return newErrInvalidInput(
+ fmt.Sprintf("invalid dig.Group(%q): group names cannot contain backquotes", o.Group), nil)
+ }
+
+ for _, i := range o.As {
+ t := reflect.TypeOf(i)
+
+ if t == nil {
+ return newErrInvalidInput("invalid dig.As(nil): argument must be a pointer to an interface", nil)
+ }
+
+ if t.Kind() != reflect.Ptr {
+ return newErrInvalidInput(
+ fmt.Sprintf("invalid dig.As(%v): argument must be a pointer to an interface", t), nil)
+ }
+
+ pointingTo := t.Elem()
+ if pointingTo.Kind() != reflect.Interface {
+ return newErrInvalidInput(
+ fmt.Sprintf("invalid dig.As(*%v): argument must be a pointer to an interface", pointingTo), nil)
+ }
+ }
+ return nil
+}
+
+// Name is a ProvideOption that specifies that all values produced by a
+// constructor should have the given name. See also the package documentation
+// about Named Values.
+//
+// Given,
+//
+// func NewReadOnlyConnection(...) (*Connection, error)
+// func NewReadWriteConnection(...) (*Connection, error)
+//
+// The following will provide two connections to the container: one under the
+// name "ro" and the other under the name "rw".
+//
+// c.Provide(NewReadOnlyConnection, dig.Name("ro"))
+// c.Provide(NewReadWriteConnection, dig.Name("rw"))
+//
+// This option cannot be provided for constructors which produce result
+// objects.
+func Name(name string) ProvideOption {
+ return provideNameOption(name)
+}
+
+type provideNameOption string
+
+func (o provideNameOption) String() string {
+ return fmt.Sprintf("Name(%q)", string(o))
+}
+
+func (o provideNameOption) applyProvideOption(opt *provideOptions) {
+ opt.Name = string(o)
+}
+
+// Group is a ProvideOption that specifies that all values produced by a
+// constructor should be added to the specified group. See also the package
+// documentation about Value Groups.
+//
+// This option cannot be provided for constructors which produce result
+// objects.
+func Group(group string) ProvideOption {
+ return provideGroupOption(group)
+}
+
+type provideGroupOption string
+
+func (o provideGroupOption) String() string {
+ return fmt.Sprintf("Group(%q)", string(o))
+}
+
+func (o provideGroupOption) applyProvideOption(opt *provideOptions) {
+ opt.Group = string(o)
+}
+
+// ID is a unique integer representing the constructor node in the dependency graph.
+type ID int
+
+// ProvideInfo provides information about the constructor's inputs and outputs
+// types as strings, as well as the ID of the constructor supplied to the Container.
+// It contains ID for the constructor, as well as slices of Input and Output types,
+// which are Stringers that report the types of the parameters and results respectively.
+type ProvideInfo struct {
+ ID ID
+ Inputs []*Input
+ Outputs []*Output
+}
+
+// Input contains information on an input parameter of a function.
+type Input struct {
+ t reflect.Type
+ optional bool
+ name, group string
+}
+
+func (i *Input) String() string {
+ toks := make([]string, 0, 3)
+ t := i.t.String()
+ if i.optional {
+ toks = append(toks, "optional")
+ }
+ if i.name != "" {
+ toks = append(toks, fmt.Sprintf("name = %q", i.name))
+ }
+ if i.group != "" {
+ toks = append(toks, fmt.Sprintf("group = %q", i.group))
+ }
+
+ if len(toks) == 0 {
+ return t
+ }
+ return fmt.Sprintf("%v[%v]", t, strings.Join(toks, ", "))
+}
+
+// Output contains information on an output produced by a function.
+type Output struct {
+ t reflect.Type
+ name, group string
+}
+
+func (o *Output) String() string {
+ toks := make([]string, 0, 2)
+ t := o.t.String()
+ if o.name != "" {
+ toks = append(toks, fmt.Sprintf("name = %q", o.name))
+ }
+ if o.group != "" {
+ toks = append(toks, fmt.Sprintf("group = %q", o.group))
+ }
+
+ if len(toks) == 0 {
+ return t
+ }
+ return fmt.Sprintf("%v[%v]", t, strings.Join(toks, ", "))
+}
+
+// FillProvideInfo is a ProvideOption that writes info on what Dig was able to get
+// out of the provided constructor into the provided ProvideInfo.
+func FillProvideInfo(info *ProvideInfo) ProvideOption {
+ return fillProvideInfoOption{info: info}
+}
+
+type fillProvideInfoOption struct{ info *ProvideInfo }
+
+func (o fillProvideInfoOption) String() string {
+ return fmt.Sprintf("FillProvideInfo(%p)", o.info)
+}
+
+func (o fillProvideInfoOption) applyProvideOption(opts *provideOptions) {
+ opts.Info = o.info
+}
+
+// As is a ProvideOption that specifies that the value produced by the
+// constructor implements one or more other interfaces and is provided
+// to the container as those interfaces.
+//
+// As expects one or more pointers to the implemented interfaces. Values
+// produced by constructors will be then available in the container as
+// implementations of all of those interfaces, but not as the value itself.
+//
+// For example, the following will make io.Reader and io.Writer available
+// in the container, but not buffer.
+//
+// c.Provide(newBuffer, dig.As(new(io.Reader), new(io.Writer)))
+//
+// That is, the above is equivalent to the following.
+//
+// c.Provide(func(...) (io.Reader, io.Writer) {
+// b := newBuffer(...)
+// return b, b
+// })
+//
+// If used with dig.Name, the type produced by the constructor and the types
+// specified with dig.As will all use the same name. For example,
+//
+// c.Provide(newFile, dig.As(new(io.Reader)), dig.Name("temp"))
+//
+// The above is equivalent to the following.
+//
+// type Result struct {
+// dig.Out
+//
+// Reader io.Reader `name:"temp"`
+// }
+//
+// c.Provide(func(...) Result {
+// f := newFile(...)
+// return Result{
+// Reader: f,
+// }
+// })
+//
+// This option cannot be provided for constructors which produce result
+// objects.
+func As(i ...interface{}) ProvideOption {
+ return provideAsOption(i)
+}
+
+type provideAsOption []interface{}
+
+func (o provideAsOption) String() string {
+ buf := bytes.NewBufferString("As(")
+ for i, iface := range o {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(reflect.TypeOf(iface).Elem().String())
+ }
+ buf.WriteString(")")
+ return buf.String()
+}
+
+func (o provideAsOption) applyProvideOption(opts *provideOptions) {
+ opts.As = append(opts.As, o...)
+}
+
+// LocationForPC is a ProvideOption which specifies an alternate function program
+// counter address to be used for debug information. The package, name, file and
+// line number of this alternate function address will be used in error messages
+// and DOT graphs. This option is intended to be used with functions created
+// with the reflect.MakeFunc method whose error messages are otherwise hard to
+// understand
+func LocationForPC(pc uintptr) ProvideOption {
+ return provideLocationOption{
+ loc: digreflect.InspectFuncPC(pc),
+ }
+}
+
+type provideLocationOption struct{ loc *digreflect.Func }
+
+func (o provideLocationOption) String() string {
+ return fmt.Sprintf("LocationForPC(%v)", o.loc)
+}
+
+func (o provideLocationOption) applyProvideOption(opts *provideOptions) {
+ opts.Location = o.loc
+}
+
+// Export is a ProvideOption which specifies that the provided function should
+// be made available to all Scopes available in the application, regardless
+// of which Scope it was provided from. By default, it is false.
+//
+// For example,
+//
+// c := New()
+// s1 := c.Scope("child 1")
+// s2:= c.Scope("child 2")
+// s1.Provide(func() *bytes.Buffer { ... })
+//
+// does not allow the constructor returning *bytes.Buffer to be made available to
+// the root Container c or its sibling Scope s2.
+//
+// With Export, you can make this constructor available to all the Scopes:
+//
+// s1.Provide(func() *bytes.Buffer { ... }, Export(true))
+func Export(export bool) ProvideOption {
+ return provideExportOption{exported: export}
+}
+
+type provideExportOption struct{ exported bool }
+
+func (o provideExportOption) String() string {
+ return fmt.Sprintf("Export(%v)", o.exported)
+}
+
+func (o provideExportOption) applyProvideOption(opts *provideOptions) {
+ opts.Exported = o.exported
+}
+
+// provider encapsulates a user-provided constructor.
+type provider interface {
+ // ID is a unique numerical identifier for this provider.
+ ID() dot.CtorID
+
+ // Order reports the order of this provider in the graphHolder.
+ // This value is usually returned by the graphHolder.NewNode method.
+ Order(*Scope) int
+
+ // Location returns where this constructor was defined.
+ Location() *digreflect.Func
+
+ // ParamList returns information about the direct dependencies of this
+ // constructor.
+ ParamList() paramList
+
+ // ResultList returns information about the values produced by this
+ // constructor.
+ ResultList() resultList
+
+ // Calls the underlying constructor, reading values from the
+ // containerStore as needed.
+ //
+ // The values produced by this provider should be submitted into the
+ // containerStore.
+ Call(containerStore) error
+
+ CType() reflect.Type
+
+ OrigScope() *Scope
+}
+
+// Provide teaches the container how to build values of one or more types and
+// expresses their dependencies.
+//
+// The first argument of Provide is a function that accepts zero or more
+// parameters and returns one or more results. The function may optionally
+// return an error to indicate that it failed to build the value. This
+// function will be treated as the constructor for all the types it returns.
+// This function will be called AT MOST ONCE when a type produced by it, or a
+// type that consumes this function's output, is requested via Invoke. If the
+// same types are requested multiple times, the previously produced value will
+// be reused.
+//
+// Provide accepts argument types or dig.In structs as dependencies, and
+// separate return values or dig.Out structs for results.
+func (c *Container) Provide(constructor interface{}, opts ...ProvideOption) error {
+ return c.scope.Provide(constructor, opts...)
+}
+
+// Provide teaches the Scope how to build values of one or more types and
+// expresses their dependencies.
+//
+// The first argument of Provide is a function that accepts zero or more
+// parameters and returns one or more results. The function may optionally
+// return an error to indicate that it failed to build the value. This
+// function will be treated as the constructor for all the types it returns.
+// This function will be called AT MOST ONCE when a type produced by it, or a
+// type that consumes this function's output, is requested via Invoke. If the
+// same types are requested multiple times, the previously produced value will
+// be reused.
+//
+// Provide accepts argument types or dig.In structs as dependencies, and
+// separate return values or dig.Out structs for results.
+//
+// When a constructor is Provided to a Scope, it will propagate this to any
+// Scopes that are descendents, but not ancestors of this Scope.
+// To provide a constructor to all the Scopes available, provide it to
+// Container, which is the root Scope.
+func (s *Scope) Provide(constructor interface{}, opts ...ProvideOption) error {
+ ctype := reflect.TypeOf(constructor)
+ if ctype == nil {
+ return newErrInvalidInput("can't provide an untyped nil", nil)
+ }
+ if ctype.Kind() != reflect.Func {
+ return newErrInvalidInput(
+ fmt.Sprintf("must provide constructor function, got %v (type %v)", constructor, ctype), nil)
+ }
+
+ var options provideOptions
+ for _, o := range opts {
+ o.applyProvideOption(&options)
+ }
+ if err := options.Validate(); err != nil {
+ return err
+ }
+
+ if err := s.provide(constructor, options); err != nil {
+ var errFunc *digreflect.Func
+ if options.Location == nil {
+ errFunc = digreflect.InspectFunc(constructor)
+ } else {
+ errFunc = options.Location
+ }
+
+ return errProvide{
+ Func: errFunc,
+ Reason: err,
+ }
+ }
+ return nil
+}
+
+func (s *Scope) provide(ctor interface{}, opts provideOptions) (err error) {
+ // If Export option is provided to the constructor, this should be injected to the
+ // root-level Scope (Container) to allow it to propagate to all other Scopes.
+ origScope := s
+ if opts.Exported {
+ s = s.rootScope()
+ }
+
+ // For all scopes affected by this change,
+ // take a snapshot of the current graph state before
+ // we start making changes to it as we may need to
+ // undo them upon encountering errors.
+ allScopes := s.appendSubscopes(nil)
+ for _, s := range allScopes {
+ s := s
+ s.gh.Snapshot()
+ defer func() {
+ if err != nil {
+ s.gh.Rollback()
+ }
+ }()
+ }
+
+ n, err := newConstructorNode(
+ ctor,
+ s,
+ origScope,
+ constructorOptions{
+ ResultName: opts.Name,
+ ResultGroup: opts.Group,
+ ResultAs: opts.As,
+ Location: opts.Location,
+ Callback: opts.Callback,
+ },
+ )
+ if err != nil {
+ return err
+ }
+
+ keys, err := s.findAndValidateResults(n.ResultList())
+ if err != nil {
+ return err
+ }
+
+ ctype := reflect.TypeOf(ctor)
+ if len(keys) == 0 {
+ return newErrInvalidInput(
+ fmt.Sprintf("%v must provide at least one non-error type", ctype), nil)
+ }
+
+ oldProviders := make(map[key][]*constructorNode)
+ for k := range keys {
+ // Cache old providers before running cycle detection.
+ oldProviders[k] = s.providers[k]
+ s.providers[k] = append(s.providers[k], n)
+ }
+
+ for _, s := range allScopes {
+ s.isVerifiedAcyclic = false
+ if s.deferAcyclicVerification {
+ continue
+ }
+ if ok, cycle := graph.IsAcyclic(s.gh); !ok {
+ // When a cycle is detected, recover the old providers to reset
+ // the providers map back to what it was before this node was
+ // introduced.
+ for k, ops := range oldProviders {
+ s.providers[k] = ops
+ }
+
+ return newErrInvalidInput("this function introduces a cycle", s.cycleDetectedError(cycle))
+ }
+ s.isVerifiedAcyclic = true
+ }
+
+ s.nodes = append(s.nodes, n)
+
+ // Record introspection info for caller if Info option is specified
+ if info := opts.Info; info != nil {
+ params := n.ParamList().DotParam()
+ results := n.ResultList().DotResult()
+
+ info.ID = (ID)(n.id)
+ info.Inputs = make([]*Input, len(params))
+ info.Outputs = make([]*Output, len(results))
+
+ for i, param := range params {
+ info.Inputs[i] = &Input{
+ t: param.Type,
+ optional: param.Optional,
+ name: param.Name,
+ group: param.Group,
+ }
+ }
+
+ for i, res := range results {
+ info.Outputs[i] = &Output{
+ t: res.Type,
+ name: res.Name,
+ group: res.Group,
+ }
+ }
+ }
+ return nil
+}
+
+// Builds a collection of all result types produced by this constructor.
+func (s *Scope) findAndValidateResults(rl resultList) (map[key]struct{}, error) {
+ var err error
+ keyPaths := make(map[key]string)
+ walkResult(rl, connectionVisitor{
+ s: s,
+ err: &err,
+ keyPaths: keyPaths,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ keys := make(map[key]struct{}, len(keyPaths))
+ for k := range keyPaths {
+ keys[k] = struct{}{}
+ }
+ return keys, nil
+}
+
+// Visits the results of a node and compiles a collection of all the keys
+// produced by that node.
+type connectionVisitor struct {
+ s *Scope
+
+ // If this points to a non-nil value, we've already encountered an error
+ // and should stop traversing.
+ err *error
+
+ // Map of keys provided to path that provided this. The path is a string
+ // documenting which positional return value or dig.Out attribute is
+ // providing this particular key.
+ //
+ // For example, "[0].Foo" indicates that the value was provided by the Foo
+ // attribute of the dig.Out returned as the first result of the
+ // constructor.
+ keyPaths map[key]string
+
+ // We track the path to the current result here. For example, this will
+ // be, ["[1]", "Foo", "Bar"] when we're visiting Bar in,
+ //
+ // func() (io.Writer, struct {
+ // dig.Out
+ //
+ // Foo struct {
+ // dig.Out
+ //
+ // Bar io.Reader
+ // }
+ // })
+ currentResultPath []string
+}
+
+func (cv connectionVisitor) AnnotateWithField(f resultObjectField) resultVisitor {
+ cv.currentResultPath = append(cv.currentResultPath, f.FieldName)
+ return cv
+}
+
+func (cv connectionVisitor) AnnotateWithPosition(i int) resultVisitor {
+ cv.currentResultPath = append(cv.currentResultPath, fmt.Sprintf("[%d]", i))
+ return cv
+}
+
+func (cv connectionVisitor) Visit(res result) resultVisitor {
+ // Already failed. Stop looking.
+ if *cv.err != nil {
+ return nil
+ }
+
+ path := strings.Join(cv.currentResultPath, ".")
+
+ switch r := res.(type) {
+
+ case resultSingle:
+ k := key{name: r.Name, t: r.Type}
+
+ if err := cv.checkKey(k, path); err != nil {
+ *cv.err = err
+ return nil
+ }
+ for _, asType := range r.As {
+ k := key{name: r.Name, t: asType}
+ if err := cv.checkKey(k, path); err != nil {
+ *cv.err = err
+ return nil
+ }
+ }
+
+ case resultGrouped:
+ // we don't really care about the path for this since conflicts are
+ // okay for group results. We'll track it for the sake of having a
+ // value there.
+ k := key{group: r.Group, t: r.Type}
+ cv.keyPaths[k] = path
+ for _, asType := range r.As {
+ k := key{group: r.Group, t: asType}
+ cv.keyPaths[k] = path
+ }
+ }
+
+ return cv
+}
+
+func (cv connectionVisitor) checkKey(k key, path string) error {
+ defer func() { cv.keyPaths[k] = path }()
+ if conflict, ok := cv.keyPaths[k]; ok {
+ return newErrInvalidInput(fmt.Sprintf("cannot provide %v from %v", k, path),
+ newErrInvalidInput(fmt.Sprintf("already provided by %v", conflict), nil))
+ }
+ if ps := cv.s.providers[k]; len(ps) > 0 {
+ cons := make([]string, len(ps))
+ for i, p := range ps {
+ cons[i] = fmt.Sprint(p.Location())
+ }
+
+ return newErrInvalidInput(fmt.Sprintf("cannot provide %v from %v", k, path),
+ newErrInvalidInput(fmt.Sprintf("already provided by %v", strings.Join(cons, "; ")), nil))
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/dig/result.go b/vendor/go.uber.org/dig/result.go
new file mode 100644
index 000000000..369cd218b
--- /dev/null
+++ b/vendor/go.uber.org/dig/result.go
@@ -0,0 +1,535 @@
+// Copyright (c) 2019-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.uber.org/dig/internal/digerror"
+ "go.uber.org/dig/internal/dot"
+)
+
+// The result interface represents a result produced by a constructor.
+//
+// The following implementations exist:
+// resultList All values returned by the constructor.
+// resultSingle A single value produced by a constructor.
+// resultObject dig.Out struct where each field in the struct can be
+// another result.
+// resultGrouped A value produced by a constructor that is part of a value
+// group.
+
+type result interface {
+ // Extracts the values for this result from the provided value and
+ // stores them into the provided containerWriter.
+ //
+ // This MAY panic if the result does not consume a single value.
+ Extract(containerWriter, bool, reflect.Value)
+
+ // DotResult returns a slice of dot.Result(s).
+ DotResult() []*dot.Result
+}
+
+var (
+ _ result = resultSingle{}
+ _ result = resultObject{}
+ _ result = resultList{}
+ _ result = resultGrouped{}
+)
+
+type resultOptions struct {
+ // If set, this is the name of the associated result value.
+ //
+ // For Result Objects, name:".." tags on fields override this.
+ Name string
+ Group string
+ As []interface{}
+}
+
+// newResult builds a result from the given type.
+func newResult(t reflect.Type, opts resultOptions) (result, error) {
+ switch {
+ case IsIn(t) || (t.Kind() == reflect.Ptr && IsIn(t.Elem())) || embedsType(t, _inPtrType):
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot provide parameter objects: %v embeds a dig.In", t), nil)
+ case isError(t):
+ return nil, newErrInvalidInput("cannot return an error here, return it from the constructor instead", nil)
+ case IsOut(t):
+ return newResultObject(t, opts)
+ case embedsType(t, _outPtrType):
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot build a result object by embedding *dig.Out, embed dig.Out instead: %v embeds *dig.Out", t), nil)
+ case t.Kind() == reflect.Ptr && IsOut(t.Elem()):
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot return a pointer to a result object, use a value instead: %v is a pointer to a struct that embeds dig.Out", t), nil)
+ case len(opts.Group) > 0:
+ g, err := parseGroupString(opts.Group)
+ if err != nil {
+ return nil, newErrInvalidInput(
+ fmt.Sprintf("cannot parse group %q", opts.Group), err)
+ }
+ rg := resultGrouped{Type: t, Group: g.Name, Flatten: g.Flatten}
+ if len(opts.As) > 0 {
+ var asTypes []reflect.Type
+ for _, as := range opts.As {
+ ifaceType := reflect.TypeOf(as).Elem()
+ if ifaceType == t {
+ continue
+ }
+ if !t.Implements(ifaceType) {
+ return nil, newErrInvalidInput(
+ fmt.Sprintf("invalid dig.As: %v does not implement %v", t, ifaceType), nil)
+ }
+ asTypes = append(asTypes, ifaceType)
+ }
+ if len(asTypes) > 0 {
+ rg.Type = asTypes[0]
+ rg.As = asTypes[1:]
+ }
+ }
+ if g.Soft {
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "cannot use soft with result value groups: soft was used with group:%q", g.Name), nil)
+ }
+ if g.Flatten {
+ if t.Kind() != reflect.Slice {
+ return nil, newErrInvalidInput(fmt.Sprintf(
+ "flatten can be applied to slices only: %v is not a slice", t), nil)
+ }
+ rg.Type = rg.Type.Elem()
+ }
+ return rg, nil
+ default:
+ return newResultSingle(t, opts)
+ }
+}
+
+// resultVisitor visits every result in a result tree, allowing tracking state
+// at each level.
+type resultVisitor interface {
+ // Visit is called on the result being visited.
+ //
+ // If Visit returns a non-nil resultVisitor, that resultVisitor visits all
+ // the child results of this result.
+ Visit(result) resultVisitor
+
+ // AnnotateWithField is called on each field of a resultObject after
+ // visiting it but before walking its descendants.
+ //
+ // The same resultVisitor is used for all fields: the one returned upon
+ // visiting the resultObject.
+ //
+ // For each visited field, if AnnotateWithField returns a non-nil
+ // resultVisitor, it will be used to walk the result of that field.
+ AnnotateWithField(resultObjectField) resultVisitor
+
+ // AnnotateWithPosition is called with the index of each result of a
+ // resultList after vising it but before walking its descendants.
+ //
+ // The same resultVisitor is used for all results: the one returned upon
+ // visiting the resultList.
+ //
+ // For each position, if AnnotateWithPosition returns a non-nil
+ // resultVisitor, it will be used to walk the result at that index.
+ AnnotateWithPosition(idx int) resultVisitor
+}
+
+// walkResult walks the result tree for the given result with the provided
+// visitor.
+//
+// resultVisitor.Visit will be called on the provided result and if a non-nil
+// resultVisitor is received, it will be used to walk its descendants. If a
+// resultObject or resultList was visited, AnnotateWithField and
+// AnnotateWithPosition respectively will be called before visiting the
+// descendants of that resultObject/resultList.
+//
+// This is very similar to how go/ast.Walk works.
+func walkResult(r result, v resultVisitor) {
+ v = v.Visit(r)
+ if v == nil {
+ return
+ }
+
+ switch res := r.(type) {
+ case resultSingle, resultGrouped:
+ // No sub-results
+ case resultObject:
+ w := v
+ for _, f := range res.Fields {
+ if v := w.AnnotateWithField(f); v != nil {
+ walkResult(f.Result, v)
+ }
+ }
+ case resultList:
+ w := v
+ for i, r := range res.Results {
+ if v := w.AnnotateWithPosition(i); v != nil {
+ walkResult(r, v)
+ }
+ }
+ default:
+ digerror.BugPanicf("received unknown result type %T", res)
+ }
+}
+
+// resultList holds all values returned by the constructor as results.
+type resultList struct {
+ ctype reflect.Type
+
+ Results []result
+
+ // For each item at index i returned by the constructor, resultIndexes[i]
+ // is the index in .Results for the corresponding result object.
+ // resultIndexes[i] is -1 for errors returned by constructors.
+ resultIndexes []int
+}
+
+func (rl resultList) DotResult() []*dot.Result {
+ var types []*dot.Result
+ for _, result := range rl.Results {
+ types = append(types, result.DotResult()...)
+ }
+ return types
+}
+
+func newResultList(ctype reflect.Type, opts resultOptions) (resultList, error) {
+ numOut := ctype.NumOut()
+ rl := resultList{
+ ctype: ctype,
+ Results: make([]result, 0, numOut),
+ resultIndexes: make([]int, numOut),
+ }
+
+ resultIdx := 0
+ for i := 0; i < numOut; i++ {
+ t := ctype.Out(i)
+ if isError(t) {
+ rl.resultIndexes[i] = -1
+ continue
+ }
+
+ r, err := newResult(t, opts)
+ if err != nil {
+ return rl, newErrInvalidInput(fmt.Sprintf("bad result %d", i+1), err)
+ }
+
+ rl.Results = append(rl.Results, r)
+ rl.resultIndexes[i] = resultIdx
+ resultIdx++
+ }
+
+ return rl, nil
+}
+
+func (resultList) Extract(containerWriter, bool, reflect.Value) {
+ digerror.BugPanicf("resultList.Extract() must never be called")
+}
+
+func (rl resultList) ExtractList(cw containerWriter, decorated bool, values []reflect.Value) error {
+ for i, v := range values {
+ if resultIdx := rl.resultIndexes[i]; resultIdx >= 0 {
+ rl.Results[resultIdx].Extract(cw, decorated, v)
+ continue
+ }
+
+ if err, _ := v.Interface().(error); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// resultSingle is an explicit value produced by a constructor, optionally
+// with a name.
+//
+// This object will be added to the graph as-is.
+type resultSingle struct {
+ Name string
+ Type reflect.Type
+
+ // If specified, this is a list of types which the value will be made
+ // available as, in addition to its own type.
+ As []reflect.Type
+}
+
+func newResultSingle(t reflect.Type, opts resultOptions) (resultSingle, error) {
+ r := resultSingle{
+ Type: t,
+ Name: opts.Name,
+ }
+
+ var asTypes []reflect.Type
+
+ for _, as := range opts.As {
+ ifaceType := reflect.TypeOf(as).Elem()
+ if ifaceType == t {
+ // Special case:
+ // c.Provide(func() io.Reader, As(new(io.Reader)))
+ // Ignore instead of erroring out.
+ continue
+ }
+ if !t.Implements(ifaceType) {
+ return r, newErrInvalidInput(
+ fmt.Sprintf("invalid dig.As: %v does not implement %v", t, ifaceType), nil)
+ }
+ asTypes = append(asTypes, ifaceType)
+ }
+
+ if len(asTypes) == 0 {
+ return r, nil
+ }
+
+ return resultSingle{
+ Type: asTypes[0],
+ Name: opts.Name,
+ As: asTypes[1:],
+ }, nil
+}
+
+func (rs resultSingle) DotResult() []*dot.Result {
+ dotResults := make([]*dot.Result, 0, len(rs.As)+1)
+ dotResults = append(dotResults, &dot.Result{
+ Node: &dot.Node{
+ Type: rs.Type,
+ Name: rs.Name,
+ },
+ })
+
+ for _, asType := range rs.As {
+ dotResults = append(dotResults, &dot.Result{
+ Node: &dot.Node{Type: asType, Name: rs.Name},
+ })
+ }
+
+ return dotResults
+}
+
+func (rs resultSingle) Extract(cw containerWriter, decorated bool, v reflect.Value) {
+ if decorated {
+ cw.setDecoratedValue(rs.Name, rs.Type, v)
+ return
+ }
+ cw.setValue(rs.Name, rs.Type, v)
+
+ for _, asType := range rs.As {
+ cw.setValue(rs.Name, asType, v)
+ }
+}
+
+// resultObject is a dig.Out struct where each field is another result.
+//
+// This object is not added to the graph. Its fields are interpreted as
+// results and added to the graph if needed.
+type resultObject struct {
+ Type reflect.Type
+ Fields []resultObjectField
+}
+
+func (ro resultObject) DotResult() []*dot.Result {
+ var types []*dot.Result
+ for _, field := range ro.Fields {
+ types = append(types, field.DotResult()...)
+ }
+ return types
+}
+
+func newResultObject(t reflect.Type, opts resultOptions) (resultObject, error) {
+ ro := resultObject{Type: t}
+ if len(opts.Name) > 0 {
+ return ro, newErrInvalidInput(fmt.Sprintf(
+ "cannot specify a name for result objects: %v embeds dig.Out", t), nil)
+ }
+
+ if len(opts.Group) > 0 {
+ return ro, newErrInvalidInput(fmt.Sprintf(
+ "cannot specify a group for result objects: %v embeds dig.Out", t), nil)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type == _outType {
+ // Skip over the dig.Out embed.
+ continue
+ }
+
+ rof, err := newResultObjectField(i, f, opts)
+ if err != nil {
+ return ro, newErrInvalidInput(fmt.Sprintf("bad field %q of %v", f.Name, t), err)
+ }
+
+ ro.Fields = append(ro.Fields, rof)
+ }
+ return ro, nil
+}
+
+func (ro resultObject) Extract(cw containerWriter, decorated bool, v reflect.Value) {
+ for _, f := range ro.Fields {
+ f.Result.Extract(cw, decorated, v.Field(f.FieldIndex))
+ }
+}
+
+// resultObjectField is a single field inside a dig.Out struct.
+type resultObjectField struct {
+ // Name of the field in the struct.
+ FieldName string
+
+ // Index of the field in the struct.
+ //
+ // We need to track this separately because not all fields of the struct
+ // map to results.
+ FieldIndex int
+
+ // Result produced by this field.
+ Result result
+}
+
+func (rof resultObjectField) DotResult() []*dot.Result {
+ return rof.Result.DotResult()
+}
+
+// newResultObjectField(i, f, opts) builds a resultObjectField from the field
+// f at index i.
+func newResultObjectField(idx int, f reflect.StructField, opts resultOptions) (resultObjectField, error) {
+ rof := resultObjectField{
+ FieldName: f.Name,
+ FieldIndex: idx,
+ }
+
+ var r result
+ switch {
+ case f.PkgPath != "":
+ return rof, newErrInvalidInput(
+ fmt.Sprintf("unexported fields not allowed in dig.Out, did you mean to export %q (%v)?", f.Name, f.Type), nil)
+
+ case f.Tag.Get(_groupTag) != "":
+ var err error
+ r, err = newResultGrouped(f)
+ if err != nil {
+ return rof, err
+ }
+
+ default:
+ var err error
+ if name := f.Tag.Get(_nameTag); len(name) > 0 {
+ // can modify in-place because options are passed-by-value.
+ opts.Name = name
+ }
+ r, err = newResult(f.Type, opts)
+ if err != nil {
+ return rof, err
+ }
+ }
+
+ rof.Result = r
+ return rof, nil
+}
+
+// resultGrouped is a value produced by a constructor that is part of a result
+// group.
+//
+// These will be produced as fields of a dig.Out struct.
+type resultGrouped struct {
+ // Name of the group as specified in the `group:".."` tag.
+ Group string
+
+ // Type of value produced.
+ Type reflect.Type
+
+ // Indicates elements of a value are to be injected individually, instead of
+ // as a group. Requires the value's slice to be a group. If set, Type will be
+ // the type of individual elements rather than the group.
+ Flatten bool
+
+ // If specified, this is a list of types which the value will be made
+ // available as, in addition to its own type.
+ As []reflect.Type
+}
+
+func (rt resultGrouped) DotResult() []*dot.Result {
+ dotResults := make([]*dot.Result, 0, len(rt.As)+1)
+ dotResults = append(dotResults, &dot.Result{
+ Node: &dot.Node{
+ Type: rt.Type,
+ Group: rt.Group,
+ },
+ })
+
+ for _, asType := range rt.As {
+ dotResults = append(dotResults, &dot.Result{
+ Node: &dot.Node{Type: asType, Group: rt.Group},
+ })
+ }
+ return dotResults
+}
+
+// newResultGrouped(f) builds a new resultGrouped from the provided field.
+func newResultGrouped(f reflect.StructField) (resultGrouped, error) {
+ g, err := parseGroupString(f.Tag.Get(_groupTag))
+ if err != nil {
+ return resultGrouped{}, err
+ }
+ rg := resultGrouped{
+ Group: g.Name,
+ Flatten: g.Flatten,
+ Type: f.Type,
+ }
+ name := f.Tag.Get(_nameTag)
+ optional, _ := isFieldOptional(f)
+ switch {
+ case g.Flatten && f.Type.Kind() != reflect.Slice:
+ return rg, newErrInvalidInput(fmt.Sprintf(
+ "flatten can be applied to slices only: field %q (%v) is not a slice", f.Name, f.Type), nil)
+ case g.Soft:
+ return rg, newErrInvalidInput(fmt.Sprintf(
+ "cannot use soft with result value groups: soft was used with group %q", rg.Group), nil)
+ case name != "":
+ return rg, newErrInvalidInput(fmt.Sprintf(
+ "cannot use named values with value groups: name:%q provided with group:%q", name, rg.Group), nil)
+ case optional:
+ return rg, newErrInvalidInput("value groups cannot be optional", nil)
+ }
+ if g.Flatten {
+ rg.Type = f.Type.Elem()
+ }
+
+ return rg, nil
+}
+
+func (rt resultGrouped) Extract(cw containerWriter, decorated bool, v reflect.Value) {
+ // Decorated values are always flattened.
+ if !decorated && !rt.Flatten {
+ cw.submitGroupedValue(rt.Group, rt.Type, v)
+ for _, asType := range rt.As {
+ cw.submitGroupedValue(rt.Group, asType, v)
+ }
+ return
+ }
+
+ if decorated {
+ cw.submitDecoratedGroupedValue(rt.Group, rt.Type, v)
+ return
+ }
+ for i := 0; i < v.Len(); i++ {
+ cw.submitGroupedValue(rt.Group, rt.Type, v.Index(i))
+ }
+}
diff --git a/vendor/go.uber.org/dig/scope.go b/vendor/go.uber.org/dig/scope.go
new file mode 100644
index 000000000..216cf18a1
--- /dev/null
+++ b/vendor/go.uber.org/dig/scope.go
@@ -0,0 +1,321 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "sort"
+ "time"
+)
+
+// A ScopeOption modifies the default behavior of Scope; currently,
+// there are no implementations.
+type ScopeOption interface {
+ noScopeOption() //yet
+}
+
+// Scope is a scoped DAG of types and their dependencies.
+// A Scope may also have one or more child Scopes that inherit
+// from it.
+type Scope struct {
+ // This implements containerStore interface.
+
+ // Name of the Scope
+ name string
+ // Mapping from key to all the constructor node that can provide a value for that
+ // key.
+ providers map[key][]*constructorNode
+
+ // Mapping from key to the decorator that decorates a value for that key.
+ decorators map[key]*decoratorNode
+
+ // constructorNodes provided directly to this Scope. i.e. it does not include
+ // any nodes that were provided to the parent Scope this inherited from.
+ nodes []*constructorNode
+
+ // Values that generated via decorators in the Scope.
+ decoratedValues map[key]reflect.Value
+
+ // Values that generated directly in the Scope.
+ values map[key]reflect.Value
+
+ // Values groups that generated directly in the Scope.
+ groups map[key][]reflect.Value
+
+ // Values groups that generated via decoraters in the Scope.
+ decoratedGroups map[key]reflect.Value
+
+ // Source of randomness.
+ rand *rand.Rand
+
+ // Flag indicating whether the graph has been checked for cycles.
+ isVerifiedAcyclic bool
+
+ // Defer acyclic check on provide until Invoke.
+ deferAcyclicVerification bool
+
+ // Recover from panics in user-provided code and wrap in an exported error type.
+ recoverFromPanics bool
+
+ // invokerFn calls a function with arguments provided to Provide or Invoke.
+ invokerFn invokerFn
+
+ // graph of this Scope. Note that this holds the dependency graph of all the
+ // nodes that affect this Scope, not just the ones provided directly to this Scope.
+ gh *graphHolder
+
+ // Parent of this Scope.
+ parentScope *Scope
+
+ // All the child scopes of this Scope.
+ childScopes []*Scope
+}
+
+func newScope() *Scope {
+ s := &Scope{
+ providers: make(map[key][]*constructorNode),
+ decorators: make(map[key]*decoratorNode),
+ values: make(map[key]reflect.Value),
+ decoratedValues: make(map[key]reflect.Value),
+ groups: make(map[key][]reflect.Value),
+ decoratedGroups: make(map[key]reflect.Value),
+ invokerFn: defaultInvoker,
+ rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+ }
+ s.gh = newGraphHolder(s)
+ return s
+}
+
+// Scope creates a new Scope with the given name and options from current Scope.
+// Any constructors that the current Scope knows about, as well as any modifications
+// made to it in the future will be propagated to the child scope.
+// However, no modifications made to the child scope being created will be propagated
+// to the parent Scope.
+func (s *Scope) Scope(name string, opts ...ScopeOption) *Scope {
+ child := newScope()
+ child.name = name
+ child.parentScope = s
+ child.invokerFn = s.invokerFn
+ child.deferAcyclicVerification = s.deferAcyclicVerification
+ child.recoverFromPanics = s.recoverFromPanics
+
+ // child copies the parent's graph nodes.
+ child.gh.nodes = append(child.gh.nodes, s.gh.nodes...)
+
+ for _, opt := range opts {
+ opt.noScopeOption()
+ }
+
+ s.childScopes = append(s.childScopes, child)
+ return child
+}
+
+// ancestors returns a list of scopes of ancestors of this scope up to the
+// root. The scope at at index 0 is this scope itself.
+func (s *Scope) ancestors() []*Scope {
+ var scopes []*Scope
+ for s := s; s != nil; s = s.parentScope {
+ scopes = append(scopes, s)
+ }
+ return scopes
+}
+
+func (s *Scope) appendSubscopes(dest []*Scope) []*Scope {
+ dest = append(dest, s)
+ for _, cs := range s.childScopes {
+ dest = cs.appendSubscopes(dest)
+ }
+ return dest
+}
+
+func (s *Scope) storesToRoot() []containerStore {
+ scopes := s.ancestors()
+ stores := make([]containerStore, len(scopes))
+ for i, s := range scopes {
+ stores[i] = s
+ }
+ return stores
+}
+
+func (s *Scope) knownTypes() []reflect.Type {
+ typeSet := make(map[reflect.Type]struct{}, len(s.providers))
+ for k := range s.providers {
+ typeSet[k.t] = struct{}{}
+ }
+
+ types := make([]reflect.Type, 0, len(typeSet))
+ for t := range typeSet {
+ types = append(types, t)
+ }
+ sort.Sort(byTypeName(types))
+ return types
+}
+
+func (s *Scope) getValue(name string, t reflect.Type) (v reflect.Value, ok bool) {
+ v, ok = s.values[key{name: name, t: t}]
+ return
+}
+
+func (s *Scope) getDecoratedValue(name string, t reflect.Type) (v reflect.Value, ok bool) {
+ v, ok = s.decoratedValues[key{name: name, t: t}]
+ return
+}
+
+func (s *Scope) setValue(name string, t reflect.Type, v reflect.Value) {
+ s.values[key{name: name, t: t}] = v
+}
+
+func (s *Scope) setDecoratedValue(name string, t reflect.Type, v reflect.Value) {
+ s.decoratedValues[key{name: name, t: t}] = v
+}
+
+func (s *Scope) getValueGroup(name string, t reflect.Type) []reflect.Value {
+ items := s.groups[key{group: name, t: t}]
+ // shuffle the list so users don't rely on the ordering of grouped values
+ return shuffledCopy(s.rand, items)
+}
+
+func (s *Scope) getDecoratedValueGroup(name string, t reflect.Type) (reflect.Value, bool) {
+ items, ok := s.decoratedGroups[key{group: name, t: t}]
+ return items, ok
+}
+
+func (s *Scope) submitGroupedValue(name string, t reflect.Type, v reflect.Value) {
+ k := key{group: name, t: t}
+ s.groups[k] = append(s.groups[k], v)
+}
+
+func (s *Scope) submitDecoratedGroupedValue(name string, t reflect.Type, v reflect.Value) {
+ k := key{group: name, t: t}
+ s.decoratedGroups[k] = v
+}
+
+func (s *Scope) getValueProviders(name string, t reflect.Type) []provider {
+ return s.getProviders(key{name: name, t: t})
+}
+
+func (s *Scope) getGroupProviders(name string, t reflect.Type) []provider {
+ return s.getProviders(key{group: name, t: t})
+}
+
+func (s *Scope) getValueDecorator(name string, t reflect.Type) (decorator, bool) {
+ return s.getDecorators(key{name: name, t: t})
+}
+
+func (s *Scope) getGroupDecorator(name string, t reflect.Type) (decorator, bool) {
+ return s.getDecorators(key{group: name, t: t})
+}
+
+func (s *Scope) getDecorators(k key) (decorator, bool) {
+ d, found := s.decorators[k]
+ return d, found
+}
+
+func (s *Scope) getProviders(k key) []provider {
+ nodes := s.providers[k]
+ providers := make([]provider, len(nodes))
+ for i, n := range nodes {
+ providers[i] = n
+ }
+ return providers
+}
+
+func (s *Scope) getAllGroupProviders(name string, t reflect.Type) []provider {
+ return s.getAllProviders(key{group: name, t: t})
+}
+
+func (s *Scope) getAllValueProviders(name string, t reflect.Type) []provider {
+ return s.getAllProviders(key{name: name, t: t})
+}
+
+func (s *Scope) getAllProviders(k key) []provider {
+ allScopes := s.ancestors()
+ var providers []provider
+ for _, scope := range allScopes {
+ providers = append(providers, scope.getProviders(k)...)
+ }
+ return providers
+}
+
+func (s *Scope) invoker() invokerFn {
+ return s.invokerFn
+}
+
+// adds a new graphNode to this Scope and all of its descendent
+// scope.
+func (s *Scope) newGraphNode(wrapped interface{}, orders map[*Scope]int) {
+ orders[s] = s.gh.NewNode(wrapped)
+ for _, cs := range s.childScopes {
+ cs.newGraphNode(wrapped, orders)
+ }
+}
+
+func (s *Scope) cycleDetectedError(cycle []int) error {
+ var path []cycleErrPathEntry
+ for _, n := range cycle {
+ if n, ok := s.gh.Lookup(n).(*constructorNode); ok {
+ path = append(path, cycleErrPathEntry{
+ Key: key{
+ t: n.CType(),
+ },
+ Func: n.Location(),
+ })
+ }
+ }
+ return errCycleDetected{Path: path, scope: s}
+}
+
+// Returns the root Scope that can be reached from this Scope.
+func (s *Scope) rootScope() *Scope {
+ curr := s
+ for curr.parentScope != nil {
+ curr = curr.parentScope
+ }
+ return curr
+}
+
+// String representation of the entire Scope
+func (s *Scope) String() string {
+ b := &bytes.Buffer{}
+ fmt.Fprintln(b, "nodes: {")
+ for k, vs := range s.providers {
+ for _, v := range vs {
+ fmt.Fprintln(b, "\t", k, "->", v)
+ }
+ }
+ fmt.Fprintln(b, "}")
+
+ fmt.Fprintln(b, "values: {")
+ for k, v := range s.values {
+ fmt.Fprintln(b, "\t", k, "=>", v)
+ }
+ for k, vs := range s.groups {
+ for _, v := range vs {
+ fmt.Fprintln(b, "\t", k, "=>", v)
+ }
+ }
+ fmt.Fprintln(b, "}")
+
+ return b.String()
+}
diff --git a/vendor/go.uber.org/dig/version.go b/vendor/go.uber.org/dig/version.go
new file mode 100644
index 000000000..0b55dc929
--- /dev/null
+++ b/vendor/go.uber.org/dig/version.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+// Version of the library.
+const Version = "1.17.0"
diff --git a/vendor/go.uber.org/dig/visualize.go b/vendor/go.uber.org/dig/visualize.go
new file mode 100644
index 000000000..d7ff32482
--- /dev/null
+++ b/vendor/go.uber.org/dig/visualize.go
@@ -0,0 +1,192 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package dig
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "text/template"
+
+ "go.uber.org/dig/internal/dot"
+)
+
+// A VisualizeOption modifies the default behavior of Visualize.
+type VisualizeOption interface {
+ applyVisualizeOption(*visualizeOptions)
+}
+
+type visualizeOptions struct {
+ VisualizeError error
+}
+
+// VisualizeError includes a visualization of the given error in the output of
+// Visualize if an error was returned by Invoke or Provide.
+//
+// if err := c.Provide(...); err != nil {
+// dig.Visualize(c, w, dig.VisualizeError(err))
+// }
+//
+// This option has no effect if the error was nil or if it didn't contain any
+// information to visualize.
+func VisualizeError(err error) VisualizeOption {
+ return visualizeErrorOption{err}
+}
+
+type visualizeErrorOption struct{ err error }
+
+func (o visualizeErrorOption) String() string {
+ return fmt.Sprintf("VisualizeError(%v)", o.err)
+}
+
+func (o visualizeErrorOption) applyVisualizeOption(opt *visualizeOptions) {
+ opt.VisualizeError = o.err
+}
+
+func updateGraph(dg *dot.Graph, err error) error {
+ var errs []errVisualizer
+ // Unwrap error to find the root cause.
+ for {
+ if ev, ok := err.(errVisualizer); ok {
+ errs = append(errs, ev)
+ }
+ e := errors.Unwrap(err)
+ if e == nil {
+ break
+ }
+ err = e
+ }
+
+ // If there are no errVisualizers included, we do not modify the graph.
+ if len(errs) == 0 {
+ return nil
+ }
+
+ // We iterate in reverse because the last element is the root cause.
+ for i := len(errs) - 1; i >= 0; i-- {
+ errs[i].updateGraph(dg)
+ }
+
+ // Remove non-error entries from the graph for readability.
+ dg.PruneSuccess()
+
+ return nil
+}
+
+var _graphTmpl = template.Must(
+ template.New("DotGraph").
+ Funcs(template.FuncMap{
+ "quote": strconv.Quote,
+ }).
+ Parse(`digraph {
+ rankdir=RL;
+ graph [compound=true];
+ {{range $g := .Groups}}
+ {{- quote .String}} [{{.Attributes}}];
+ {{range .Results}}
+ {{- quote $g.String}} -> {{quote .String}};
+ {{end}}
+ {{end -}}
+ {{range $index, $ctor := .Ctors}}
+ subgraph cluster_{{$index}} {
+ {{ with .Package }}label = {{ quote .}};
+ {{ end -}}
+
+ constructor_{{$index}} [shape=plaintext label={{quote .Name}}];
+ {{with .ErrorType}}color={{.Color}};{{end}}
+ {{range .Results}}
+ {{- quote .String}} [{{.Attributes}}];
+ {{end}}
+ }
+ {{range .Params}}
+ constructor_{{$index}} -> {{quote .String}} [ltail=cluster_{{$index}}{{if .Optional}} style=dashed{{end}}];
+ {{end}}
+ {{range .GroupParams}}
+ constructor_{{$index}} -> {{quote .String}} [ltail=cluster_{{$index}}];
+ {{end -}}
+ {{end}}
+ {{range .Failed.TransitiveFailures}}
+ {{- quote .String}} [color=orange];
+ {{end -}}
+ {{range .Failed.RootCauses}}
+ {{- quote .String}} [color=red];
+ {{end}}
+}`))
+
+// Visualize parses the graph in Container c into DOT format and writes it to
+// io.Writer w.
+func Visualize(c *Container, w io.Writer, opts ...VisualizeOption) error {
+ dg := c.createGraph()
+
+ var options visualizeOptions
+ for _, o := range opts {
+ o.applyVisualizeOption(&options)
+ }
+
+ if options.VisualizeError != nil {
+ if err := updateGraph(dg, options.VisualizeError); err != nil {
+ return err
+ }
+ }
+
+ return _graphTmpl.Execute(w, dg)
+}
+
+// CanVisualizeError returns true if the error is an errVisualizer.
+func CanVisualizeError(err error) bool {
+ for {
+ if _, ok := err.(errVisualizer); ok {
+ return true
+ }
+ e := errors.Unwrap(err)
+ if e == nil {
+ break
+ }
+ err = e
+ }
+
+ return false
+}
+
+func (c *Container) createGraph() *dot.Graph {
+ return c.scope.createGraph()
+}
+
+func (s *Scope) createGraph() *dot.Graph {
+ dg := dot.NewGraph()
+
+ for _, n := range s.nodes {
+ dg.AddCtor(newDotCtor(n), n.paramList.DotParam(), n.resultList.DotResult())
+ }
+
+ return dg
+}
+
+func newDotCtor(n *constructorNode) *dot.Ctor {
+ return &dot.Ctor{
+ ID: n.id,
+ Name: n.location.Name,
+ Package: n.location.Package,
+ File: n.location.File,
+ Line: n.location.Line,
+ }
+}
diff --git a/vendor/go.uber.org/fx/.codecov.yml b/vendor/go.uber.org/fx/.codecov.yml
new file mode 100644
index 000000000..a64b47401
--- /dev/null
+++ b/vendor/go.uber.org/fx/.codecov.yml
@@ -0,0 +1,17 @@
+ignore:
+ - "docs/ex/**/*.go"
+
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 90% # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
diff --git a/vendor/go.uber.org/fx/.gitignore b/vendor/go.uber.org/fx/.gitignore
new file mode 100644
index 000000000..70a7b4ccc
--- /dev/null
+++ b/vendor/go.uber.org/fx/.gitignore
@@ -0,0 +1,16 @@
+/vendor
+/.bench
+*.mem
+*.cpu
+*.test
+*.log
+*.out
+*.html
+*.coverprofile
+coverage.txt
+*.pprof
+/.bin
+/.cache
+/bin
+.vscode
+.mdoxcache
diff --git a/vendor/go.uber.org/fx/CHANGELOG.md b/vendor/go.uber.org/fx/CHANGELOG.md
new file mode 100644
index 000000000..9f58ac176
--- /dev/null
+++ b/vendor/go.uber.org/fx/CHANGELOG.md
@@ -0,0 +1,375 @@
+---
+sidebarDepth: 0
+search: false
+---
+
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [1.19.2](https://github.com/uber-go/fx/compare/v1.19.1...v1.19.2) - 2023-02-21
+### Changed
+- Upgrade Dig dependency to v1.16.1.
+
+
+## [1.19.1](https://github.com/uber-go/fx/compare/v1.18.0...v1.19.1) - 2023-01-10
+### Changed
+- Calling `fx.Stop()` after the `App` has already stopped no longer errors out.
+
+### Fixed
+- Addressed a regression in 1.19.0 release which caused apps to ignore OS signals
+ after running for startTimeout duration.
+
+## [1.19.0](https://github.com/uber-go/fx/compare/v1.18.2...v1.19.0) - 2023-01-03
+### Added
+- `fx.RecoverFromPanics` Option which allows Fx to recover from user-provided constructors
+ and invoked functions.
+- `fx.Private` that allows the constructor to limit the scope of its outputs to the wrapping
+ `fx.Module`.
+- `ExitCode` ShutdownOption which allows setting custom exit code at the end of app
+ lifecycle.
+- `Wait` which returns a channel that can be used for waiting on application shutdown.
+- fxevent/ZapLogger now exposes `UseLogLevel` and `UseErrorLevel` methods to set
+ the level of the Zap logs produced by it.
+- Add lifecycle hook-convertible methods: `StartHook`, `StopHook`, `StartStopHook`
+ that can be used with more function signatures.
+
+### Changed
+- `fx.WithLogger` can now be passed at `fx.Module` level, setting custom logger at
+ `Module` scope instead of the whole `App`.
+
+### Fixed
+- `fx.OnStart` and `fx.OnStop` Annotations now work with annotated types that was
+ provided by the annotated constructor.
+- fxevent/ZapLogger: Errors from `fx.Supply` are now logged at `Error` level, not
+ `Info`.
+- A race condition in lifecycle Start/Stop methods.
+- Typos in docs.
+
+## [1.18.2](https://github.com/uber-go/fx/compare/v1.18.1...v1.18.2) - 2022-09-28
+
+### Added
+- Clarify ordering of `Invoke`s in `Module`s.
+
+### Fixed
+- Fix `Decorate` not being applied to transitive dependencies at root `App` level.
+
+## [1.18.1](https://github.com/uber-go/fx/compare/v1.18.0...v1.18.1) - 2022-08-08
+
+### Fixed
+- Fix a nil panic when `nil` is passed to `OnStart` and `OnStop` lifecycle methods.
+
+## [1.18.0](https://github.com/uber-go/fx/compare/v1.17.1...v1.18.0) - 2022-08-05
+
+### Added
+- Soft value groups that lets you specify value groups as best-effort dependencies.
+- `fx.OnStart` and `fx.OnStop` annotations which lets you annotate dependencies to provide
+ OnStart and OnStop lifecycle hooks.
+- A new `fxevent.Replaced` event written to `fxevent.Logger` following an `fx.Replace`.
+
+### Fixed
+- Upgrade Dig dependency to v1.14.1 to address a couple of issues with decorations. Refer to
+ Dig v1.14.1 release notes for more details.
+- `fx.WithLogger` no longer ignores decorations and replacements of types that
+ it depends on.
+- Don't run lifecycle hooks if the context for them has already expired.
+- `App.Start` and `App.Stop` no longer deadlock if the OnStart/OnStop hook
+ exits the current goroutine.
+- `fxevent.ConsoleLogger` no longer emits an extraneous argument for the
+ Supplied event.
+
+### Deprecated
+- `fx.Extract` in favor of `fx.Populate`.
+
+## [1.17.1](https://github.com/uber-go/fx/compare/v1.17.0...v1.17.1) - 2022-03-23
+
+### Added
+- Logging for provide/invoke/decorate now includes the associated `fx.Module` name.
+
+## [1.17.0](https://github.com/uber-go/fx/compare/v1.16.0...v1.17.0) - 2022-02-28
+
+### Added
+- Add `fx.Module` which scopes any modifications made to the dependency graph.
+- Add `fx.Decorate` and `fx.Replace` that lets you modify a dependency graph with decorators.
+- Add `fxevent.Decorated` event which gets emitted upon a dependency getting decorated.
+
+### Changed
+- `fx.Annotate`: Validate that `fx.In` or `fx.Out` structs are not passed to it.
+- `fx.Annotate`: Upon failure to Provide, the error contains the actual location
+ of the provided constructor.
+
+## [1.16.0](https://github.com/uber-go/fx/compare/v1.15.0...v1.16.0) - 2021-12-02
+
+### Added
+- Add the ability to provide a function as multiple interfaces at once using `fx.As`.
+
+### Changed
+- `fx.Annotate`: support variadic functions, and feeding value groups into them.
+
+### Fixed
+- Fix an issue where OnStop hooks weren't getting called on SIGINT on Windows.
+- Fix a data race between app.Done() and shutdown.
+
+## [1.15.0](https://github.com/uber-go/fx/compare/v1.14.2...v1.15.0) - 2021-11-08
+
+### Added
+- Add `fx.Annotate` to allow users to provide parameter and result tags easily without
+ having to create `fx.In` or `fx.Out` structs.
+- Add `fx.As` that allows users to annotate a constructor to provide its result type(s) as
+ interface(s) that they implement instead of the types themselves.
+
+### Fixed
+- Fix `fxevent.Stopped` not being logged when `App.Stop` is called.
+- Fix `fxevent.Started` or `fxevent.Stopped` not being logged when start or
+ stop times out.
+
+## [1.14.2](https://github.com/uber-go/fx/compare/v1.14.1...v1.14.2) - 2021-08-16
+
+### Changed
+- For `fxevent` console implementation: no longer log non-error case for `fxevent.Invoke`
+ event, while for zap implementation, start logging `fx.Invoking` case without stack.
+
+## [1.14.1](https://github.com/uber-go/fx/compare/v1.14.0...v1.14.1) - 2021-08-16
+
+### Changed
+- `fxevent.Invoked` was being logged at `Error` level even upon successful `Invoke`.
+ This was changed to log at `Info` level when `Invoke` succeeded.
+
+## [1.14.0](https://github.com/uber-go/fx/compare/v1.13.1...v1.14.0) - 2021-08-12
+
+### Added
+- Introduce the new `fx.WithLogger` option. Provide a constructor for
+ `fxevent.Logger` objects with it to customize how Fx logs events.
+- Add new `fxevent` package that exposes events from Fx in a structured way.
+ Use this to write custom logger implementations for use with the
+ `fx.WithLogger` option.
+- Expose and log additional information when lifecycle hooks time out.
+
+### Changed
+- Fx now emits structured JSON logs by default. These may be parsed and
+ processed by log ingestion systems.
+- `fxtest.Lifecycle` now logs to the provided `testing.TB` instead of stderr.
+- `fx.In` and `fx.Out` are now type aliases instead of structs.
+
+## [1.13.1](https://github.com/uber-go/fx/compare/v1.13.0...v1.13.1) - 2020-08-19
+
+### Fixed
+- Fix minimum version constraint for dig. `fx.ValidateGraph` requires at least
+ dig 1.10.
+
+## [1.13.0](https://github.com/uber-go/fx/compare/v1.12.0...v1.13.0) - 2020-06-16
+
+### Added
+- Added `fx.ValidateGraph` which allows graph cycle validation and dependency correctness
+ without running anything. This is useful if `fx.Invoke` has side effects, does I/O, etc.
+
+## [1.12.0](https://github.com/uber-go/fx/compare/v1.11.0...v1.12.0) - 2020-04-09
+
+### Added
+- Added `fx.Supply` to provide externally created values to Fx containers
+ without building anonymous constructors.
+
+### Changed
+- Drop library dependency on development tools.
+
+## [1.11.0](https://github.com/uber-go/fx/compare/v1.10.0...v1.11.0) - 2020-04-01
+
+### Added
+- Value groups can use the `flatten` option to indicate values in a slice should
+ be provided individually rather than providing the slice itself. See package
+ documentation for details.
+
+## [1.10.0](https://github.com/uber-go/fx/compare/v1.9.0...v1.10.0) - 2019-11-20
+
+### Added
+- All `fx.Option`s now include readable string representations.
+- Report stack traces when `fx.Provide` and `fx.Invoke` calls fail. This
+ should make these errors more debuggable.
+
+### Changed
+- Migrated to Go modules.
+
+## [1.9.0](https://github.com/uber-go/fx/compare/v1.8.0...v1.9.0) - 2019-01-22
+
+### Added
+- Add the ability to shutdown Fx applications from inside the container. See
+ the Shutdowner documentation for details.
+- Add `fx.Annotated` to allow users to provide named values without creating a
+ new constructor.
+
+## [1.8.0](https://github.com/uber-go/fx/compare/v1.7.1...v1.8.0) - 2018-11-06
+
+### Added
+- Provide DOT graph of dependencies in the container.
+
+## [1.7.1](https://github.com/uber-go/fx/compare/v1.7.0...v1.7.1) - 2018-09-26
+
+### Fixed
+- Make `fxtest.New` ensure that the app was created successfully. Previously,
+ it would return the app (similar to `fx.New`, which expects the user to verify
+ the error).
+- Update dig container to defer acyclic validation until after Invoke. Application
+ startup time should improve proportional to the size of the dependency graph.
+- Fix a goroutine leak in `fxtest.Lifecycle`.
+
+## [1.7.0](https://github.com/uber-go/fx/compare/v1.6.0...v1.7.0) - 2018-08-16
+
+### Added
+- Add `fx.ErrorHook` option to allow users to provide `ErrorHandler`s on invoke
+ failures.
+- `VisualizeError` returns the visualization wrapped in the error if available.
+
+## [1.6.0](https://github.com/uber-go/fx/compare/v1.5.0...v1.6.0) - 2018-06-12
+
+### Added
+- Add `fx.Error` option to short-circuit application startup.
+
+## [1.5.0](https://github.com/uber-go/fx/compare/v1.4.0...v1.5.0) - 2018-04-11
+
+### Added
+- Add `fx.StartTimeout` and `fx.StopTimeout` to make configuring application
+ start and stop timeouts easier.
+- Export the default start and stop timeout as `fx.DefaultTimeout`.
+
+### Fixed
+- Make `fxtest` respect the application's start and stop timeouts.
+
+## [1.4.0](https://github.com/uber-go/fx/compare/v1.3.0...v1.4.0) - 2017-12-07
+
+### Added
+- Add `fx.Populate` to populate variables with values from the dependency
+ injection container without requiring intermediate structs.
+
+## [1.3.0](https://github.com/uber-go/fx/compare/v1.2.0...v1.3.0) - 2017-11-28
+
+### Changed
+- Improve readability of hook logging in addition to provide and invoke.
+
+### Fixed
+- Fix bug which caused the OnStop for a lifecycle hook to be called even if it
+ failed to start.
+
+## [1.2.0](https://github.com/uber-go/fx/compare/v1.1.0...v1.2.0) - 2017-09-06
+
+### Added
+- Add `fx.NopLogger` which disables the Fx application's log output.
+
+## [1.1.0](https://github.com/uber-go/fx/compare/v1.0.0...v1.1.0) - 2017-08-22
+
+### Changed
+- Improve readability of start up logging.
+
+## [1.0.0](https://github.com/uber-go/fx/compare/v1.0.0-rc2...v1.0.0) - 2017-07-31
+
+First stable release: no breaking changes will be made in the 1.x series.
+
+### Added
+- `fx.Extract` now supports `fx.In` tags on target structs.
+
+### Changed
+- **[Breaking]** Rename `fx.Inject` to `fx.Extract`.
+- **[Breaking]** Rename `fxtest.Must*` to `fxtest.Require*`.
+
+### Removed
+- **[Breaking]** Remove `fx.Timeout` and `fx.DefaultTimeout`.
+
+## [1.0.0-rc2](https://github.com/uber-go/fx/compare/v1.0.0-rc1...v1.0.0-rc2) - 2017-07-21
+
+- **[Breaking]** Lifecycle hooks now take a context.
+- Add `fx.In` and `fx.Out` which exposes optional and named types.
+ Modules should embed these types instead of relying on `dig.In` and `dig.Out`.
+- Add an `Err` method to retrieve the underlying errors during the dependency
+ graph construction. The same error is also returned from `Start`.
+- Graph resolution now happens as part of `fx.New`, rather than at the beginning
+ of `app.Start`. This allows inspection of the graph errors through `app.Err()`
+ before the decision to start the app.
+- Add a `Logger` option, which allows users to send Fx's logs to different
+ sink.
+- Add `fxtest.App`, which redirects log output to the user's `testing.TB` and
+ provides some lifecycle helpers.
+
+## [1.0.0-rc1](https://github.com/uber-go/fx/compare/v1.0.0-beta4...v1.0.0-rc1) - 2017-06-20
+
+- **[Breaking]** Providing types into `fx.App` and invoking functions are now
+ options passed during application construction. This makes users'
+ interactions with modules and collections of modules identical.
+- **[Breaking]** `TestLifecycle` is now in a separate `fxtest` subpackage.
+- Add `fx.Inject()` to pull values from the container into a struct.
+
+## [1.0.0-beta4](https://github.com/uber-go/fx/compare/v1.0.0-beta3...v1.0.0-beta4) - 2017-06-12
+
+- **[Breaking]** Monolithic framework, as released in initial betas, has been
+ broken into smaller pieces as a result of recent advances in `dig` library.
+ This is a radical departure from the previous direction, but it needed to
+ be done for the long-term good of the project.
+- **[Breaking]** `Module interface` has been scoped all the way down to being
+ *a single dig constructor*. This allows for very sophisticated module
+ compositions. See `go.uber.org/dig` for more information on the constructors.
+- **[Breaking]** `package config` has been moved to its own repository.
+ see `go.uber.org/config` for more information.
+- `fx.Lifecycle` has been added for modules to hook into the framework
+ lifecycle events.
+- `service.Host` interface which composed a number of primitives together
+ (configuration, metrics, tracing) has been deprecated in favor of
+ `fx.App`.
+
+## [1.0.0-beta3](https://github.com/uber-go/fx/compare/v1.0.0-beta2...v1.0.0-beta3) - 2017-03-28
+
+- **[Breaking]** Environment config provider was removed. If you were using
+ environment variables to override YAML values, see config documentation for
+ more information.
+- **[Breaking]** Simplify Provider interface: remove `Scope` method from the
+ `config.Provider` interface, one can use either ScopedProvider and Value.Get()
+ to access sub fields.
+- Add `task.MustRegister` convenience function which fails fast by panicking
+ Note that this should only be used during app initialization, and is provided
+ to avoid repetetive error checking for services which register many tasks.
+- Expose options on task module to disable execution. This will allow users to
+ enqueue and consume tasks on different clusters.
+- **[Breaking]** Rename Backend interface `Publish` to `Enqueue`. Created a new
+ `ExecuteAsync` method that will kick off workers to consume tasks and this is
+ subsumed by module Start.
+- **[Breaking]** Rename package `uhttp/client` to `uhttp/uhttpclient` for clarity.
+- **[Breaking]** Rename `PopulateStruct` method in value to `Populate`.
+ The method can now populate not only structs, but anything: slices,
+ maps, builtin types and maps.
+- **[Breaking]** `package dig` has moved from `go.uber.org/fx/dig` to a new home
+ at `go.uber.org/dig`.
+- **[Breaking]** Pass a tracer the `uhttp/uhttpclient` constructor explicitly, instead
+ of using a global tracer. This will allow to use http client in parallel tests.
+
+## [1.0.0-beta2](https://github.com/uber-go/fx/compare/v1.0.0-beta1...v1.0.0-beta2) - 2017-03-09
+
+- **[Breaking]** Remove `ulog.Logger` interface and expose `*zap.Logger` directly.
+- **[Breaking]** Rename config and module from `modules.rpc` to `modules.yarpc`
+- **[Breaking]** Rename config key from `modules.http` to `modules.uhttp` to match
+ the module name
+- **[Breaking]** Upgrade `zap` to `v1.0.0-rc.3` (now go.uber.org/zap, was
+ github.com/uber-go/zap)
+- Remove now-unused `config.IsDevelopmentEnv()` helper to encourage better
+ testing practices. Not a breaking change as nobody is using this func
+ themselves according to our code search tool.
+- Log `traceID` and `spanID` in hex format to match Jaeger UI. Upgrade Jaeger to
+ min version 2.1.0
+ and use jaeger's adapters for jaeger and tally initialization.
+- Tally now supports reporting histogram samples for a bucket. Upgrade Tally to 2.1.0
+- **[Breaking]** Make new module naming consistent `yarpc.ThriftModule` to
+ `yarpc.New`, `task.NewModule`
+ to `task.New`
+- **[Breaking]** Rename `yarpc.CreateThriftServiceFunc` to `yarpc.ServiceCreateFunc`
+ as it is not thrift-specific.
+- Report version metrics for company-wide version usage information.
+- Allow configurable service name and module name via service options.
+- DIG constructors now support returning a tuple with the second argument being
+ an error.
+
+## 1.0.0-beta1 - 2017-02-20
+
+This is the first beta release of the framework, where we invite users to start
+building services on it and provide us feedback. **Warning** we are not
+promising API compatibility between beta releases and the final 1.0.0 release.
+In fact, we expect our beta user feedback to require some changes to the way
+things work. Once we reach 1.0, we will provider proper version compatibility.
diff --git a/vendor/go.uber.org/fx/CONTRIBUTING.md b/vendor/go.uber.org/fx/CONTRIBUTING.md
new file mode 100644
index 000000000..810fe62e1
--- /dev/null
+++ b/vendor/go.uber.org/fx/CONTRIBUTING.md
@@ -0,0 +1,291 @@
+---
+sidebarDepth: 2
+search: false
+---
+
+# Contributing
+
+Thanks for helping to make Fx better for everyone!
+
+If you'd like to add new exported APIs,
+please [open an issue](https://github.com/uber-go/fx/issues/new)
+describing your proposal.
+Discussing API changes ahead of time makes pull request review much smoother.
+
+::: tip
+You'll need to sign [Uber's CLA](https://cla-assistant.io/uber-go/fx)
+before we can accept any of your contributions.
+If necessary, a bot will remind
+you to accept the CLA when you open your pull request.
+:::
+
+## Contribute code
+
+Set up your local development environment to contribute to Fx.
+
+1. [Fork](https://github.com/uber-go/fx/fork), then clone the repository.
+
+
+
+ ```bash
+ git clone https://github.com/your_github_username/fx.git
+ cd fx
+ git remote add upstream https://github.com/uber-go/fx.git
+ git fetch upstream
+ ```
+
+
+
+ ```bash
+ gh repo fork --clone uber-go/fx
+ ```
+
+
+
+2. Install Fx's dependencies:
+
+ ```bash
+ go mod download
+ ```
+
+3. Verify that tests and other checks pass locally.
+
+ ```bash
+ make lint
+ make test
+ ```
+
+ Note that for `make lint` to work,
+ you must be using the latest stable version of Go.
+ If you're on an older version, you can still contribute your change,
+ but we may discover style violations when you open the pull request.
+
+Next, make your changes.
+
+1. Create a new feature branch.
+
+ ```bash
+ git checkout master
+ git pull
+ git checkout -b cool_new_feature
+ ```
+
+2. Make your changes, and verify that all tests and lints still pass.
+
+ ```bash
+ $EDITOR app.go
+ make lint
+ make test
+ ```
+
+3. When you're satisfied with the change,
+ push it to your fork and make a pull request.
+
+
+
+ ```bash
+ git push origin cool_new_feature
+ # Open a PR at https://github.com/uber-go/fx/compare
+ ```
+
+
+
+ ```bash
+ gh pr create
+ ```
+
+
+
+At this point, you're waiting on us to review your changes.
+We *try* to respond to issues and pull requests within a few business days,
+and we may suggest some improvements or alternatives.
+Once your changes are approved, one of the project maintainers will merge them.
+
+The review process will go more smoothly if you:
+
+- add tests for new functionality
+- write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html)
+- maintain backward compatibility
+- follow our [style guide](https://github.com/uber-go/guide/blob/master/style.md)
+
+## Contribute documentation
+
+To contribute documentation to Fx,
+
+1. Set up your local development environment
+ as you would to [contribute code](#contribute-code).
+
+2. Install the documentation website dependencies.
+
+ ```bash
+ cd docs
+ yarn install
+ ```
+
+3. Run the development server.
+
+ ```bash
+ yarn dev
+ ```
+
+4. Make your changes.
+
+Documentation changes should adhere to the guidance laid out below.
+
+### Document by purpose
+
+Documentation is organized in one of the following categories.
+
+- **Tutorials**: These hold step-by-step instructions for an end-to-end project
+ that a beginner could follow along to.
+ Don't spend time explaining things.
+ If explanations are available elsewhere, link to them.
+ These are entry points to answer the prompt,
+ "I don't know what Fx is, show me what it can do,"
+ so there won't be too many of these.
+- **Explanations**: These hold long-form explanations of concepts and ideas.
+ These are intended to build an understanding of Fx.
+ Feel free to go wild here--use learning aids like diagrams, tables, etc.
+- **How-tos**: These are step-by-step instructions for a *specific problem*.
+ Unlike tutorials, these are not meant to be end-to-end.
+ Feel free to leave things out, make assumptions,
+ or provide options ("if you're doing this, do this").
+ As with tutorials, don't spend time explaining;
+ link to explanations elsewhere.
+
+As an example,
+
+- A tutorial will use lifecycle hooks as part of
+ a larger set of instructions for a full end-to-end application.
+- An explanation will explain what lifecycle hooks are, how they work,
+ when and how you should use them, and link to relevant APIs and guides.
+- A how-to guide will demonstrate how to use lifecycle hooks
+ with an HTTP server, a gRPC server, etc.
+
+Explanations and how-to guides are often on the same page,
+but they should be in distinct sections.
+
+This separation is inspired by the
+[Divio documentation system](https://documentation.divio.com/),
+
+### Formatting
+
+#### ATX-style headers
+
+Use ATX-style headers (`#`-prefixed),
+not Setext-style (underlined with `===` or `---`).
+
+```markdown
+Bad header
+==========
+
+## Good header
+```
+
+#### Semantic Line Breaks
+
+- **Do not** write overly long lines of text
+- **Do not** "reflow" Markdown paragraphs
+- **Do** use [Semantic Line Breaks](https://sembr.org/) to break these lines down
+
+```markdown
+This is a bad paragraph because it's really long, all on one line. When I open this in a text editor, I'll have to scroll right.
+
+This is a bad paragraph because even though it's not all one one line, it adds
+line breaks when it reaches the line length limit. This means that anytime I
+change anything in this paragraph, I have to "reflow" it, which will change
+other lines and make the change I'm making more difficult to review.
+
+This is a good paragraph. It uses semantic line breaks.
+I can add words or modify an existing sentence,
+or even parts of a sentence,
+easily and without affecting other lines.
+When I change something, the actual change I made is easy to review.
+Markdown will reflow this into a "normal" pargraph when rendering.
+```
+
+### Test everything
+
+All code samples in documentation must be buildable and testable.
+
+To aid in this, we have two tools:
+
+- [mdox](https://github.com/bwplotka/mdox/)
+- the `region` shell script
+
+#### mdox
+
+mdox is a Markdown file formatter that includes support for
+running a command and using its output as part of a code block.
+To use this, declare a regular code block and tag it with `mdoc-exec`.
+
+```markdown
+```go mdox-exec='cat foo.go'
+// ...
+```
+
+The contents of the code block will be replaced
+with the output of the command when you run `make fmt`
+in the docs directory.
+`make check` will ensure that the contents are up-to-date.
+
+The command runs with the working directory set to docs/.
+Store code samples in ex/ and reference them directly.
+
+#### region
+
+The `region` shell script is a command intended to be used with `mdox-exec`.
+
+```plain mdox-exec='region' mdox-expect-exit-code='1'
+USAGE: region FILE REGION1 REGION2 ...
+
+Extracts text from FILE marked by "// region" blocks.
+```
+
+For example, given the file:
+
+```
+foo
+// region myregion
+bar
+// endregion myregion
+baz
+```
+
+Running `region $FILE myregion` will print:
+
+```
+bar
+```
+
+The same region name may be used multiple times
+to pull different snippets from the same file.
+For example, given the file:
+
+```go
+// region provide-foo
+func main() {
+ fx.New(
+ fx.Provide(
+ NewFoo,
+ // endregion provide-foo
+ NewBar,
+ // region provide-foo
+ ),
+ ).Run()
+}
+
+// endregion provide-foo
+```
+
+`region $FILE provide-foo` will print,
+
+```go
+func main() {
+ fx.New(
+ fx.Provide(
+ NewFoo,
+ ),
+ ).Run()
+}
+```
diff --git a/vendor/github.com/libp2p/go-libp2p-core/LICENSE-MIT b/vendor/go.uber.org/fx/LICENSE
similarity index 87%
rename from vendor/github.com/libp2p/go-libp2p-core/LICENSE-MIT
rename to vendor/go.uber.org/fx/LICENSE
index ea532a830..20e81eaf1 100644
--- a/vendor/github.com/libp2p/go-libp2p-core/LICENSE-MIT
+++ b/vendor/go.uber.org/fx/LICENSE
@@ -1,4 +1,4 @@
-Copyright 2019. Protocol Labs, Inc.
+Copyright (c) 2016-2018 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -7,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/fx/Makefile b/vendor/go.uber.org/fx/Makefile
new file mode 100644
index 000000000..54953fe94
--- /dev/null
+++ b/vendor/go.uber.org/fx/Makefile
@@ -0,0 +1,86 @@
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+STATICCHECK = $(GOBIN)/staticcheck
+FXLINT = $(GOBIN)/fxlint
+MDOX = $(GOBIN)/mdox
+
+GO_FILES = $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' -o -path '*/testdata/*' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+MODULES = . ./tools ./docs
+
+# 'make cover' should not run on docs by default.
+# We run that separately explicitly on a specific platform.
+COVER_MODULES ?= $(filter-out ./docs,$(MODULES))
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: install
+install:
+ go mod download
+
+.PHONY: test
+test:
+ @$(foreach dir,$(MODULES),(cd $(dir) && go test -race ./...) &&) true
+
+.PHONY: cover
+cover:
+ @$(foreach dir,$(COVER_MODULES), \
+ (cd $(dir) && \
+ echo "[cover] $(dir)" && \
+ go test -race -coverprofile=cover.out -coverpkg=./... ./... && \
+ go tool cover -html=cover.out -o cover.html) &&) true
+
+$(GOLINT): tools/go.mod
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK): tools/go.mod
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+$(MDOX): tools/go.mod
+ cd tools && go install github.com/bwplotka/mdox
+
+$(FXLINT): tools/cmd/fxlint/main.go
+ cd tools && go install go.uber.org/fx/tools/cmd/fxlint
+
+.PHONY: lint
+lint: $(GOLINT) $(STATICCHECK) $(FXLINT) docs-check
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @$(foreach dir,$(MODULES),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking lint..."
+ @$(foreach dir,$(MODULES),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking staticcheck..."
+ @$(foreach dir,$(MODULES),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking fxlint..."
+ @$(FXLINT) ./... | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e vendor -e Makefile -e .md | tee -a lint.log
+ @echo "Checking for license headers..."
+ @./checklicense.sh | tee -a lint.log
+ @[ ! -s lint.log ]
+ @echo "Checking 'go mod tidy'..."
+ @make tidy
+ @if ! git diff --quiet; then \
+ echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
+ git --no-pager diff; \
+ fi
+
+.PHONY: docs
+docs:
+ cd docs && yarn build
+
+.PHONY: docs-check
+docs-check: $(MDOX)
+ @echo "Checking documentation"
+ @make -C docs check | tee -a lint.log
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULES),(cd $(dir) && go mod tidy) &&) true
diff --git a/vendor/go.uber.org/fx/README.md b/vendor/go.uber.org/fx/README.md
new file mode 100644
index 000000000..985090a31
--- /dev/null
+++ b/vendor/go.uber.org/fx/README.md
@@ -0,0 +1,40 @@
+# :unicorn: Fx [![GoDoc](https://pkg.go.dev/badge/go.uber.org/fx)](https://pkg.go.dev/go.uber.org/fx) [![Github release](https://img.shields.io/github/release/uber-go/fx.svg)](https://github.com/uber-go/fx/releases) [![Build Status](https://github.com/uber-go/fx/actions/workflows/go.yml/badge.svg)](https://github.com/uber-go/fx/actions/workflows/go.yml) [![Coverage Status](https://codecov.io/gh/uber-go/fx/branch/master/graph/badge.svg)](https://codecov.io/gh/uber-go/fx/branch/master) [![Go Report Card](https://goreportcard.com/badge/go.uber.org/fx)](https://goreportcard.com/report/go.uber.org/fx)
+
+Fx is a dependency injection system for Go.
+
+**Benefits**
+
+- Eliminate globals: Fx helps you remove global state from your application.
+ No more `init()` or global variables. Use Fx-managed singletons.
+- Code reuse: Fx lets teams within your organization build loosely-coupled
+ and well-integrated shareable components.
+- Battle tested: Fx is the backbone of nearly all Go services at Uber.
+
+See our [docs](https://uber-go.github.io/fx/) to get started and/or
+learn more about Fx.
+
+## Installation
+
+Use Go modules to install Fx in your application.
+
+```shell
+go get go.uber.org/fx@v1
+```
+
+## Getting started
+
+To get started with Fx, [start here](https://uber-go.github.io/fx/get-started/).
+
+## Stability
+
+This library is `v1` and follows [SemVer](http://semver.org/) strictly.
+
+No breaking changes will be made to exported APIs before `v2.0.0`.
+
+This project follows the [Go Release Policy](https://golang.org/doc/devel/release.html#policy). Each major
+version of Go is supported until there are two newer major releases.
+
+## Stargazers over time
+
+[![Stargazers over time](https://starchart.cc/uber-go/fx.svg)](https://starchart.cc/uber-go/fx)
+
diff --git a/vendor/go.uber.org/fx/annotated.go b/vendor/go.uber.org/fx/annotated.go
new file mode 100644
index 000000000..d8a17e4dd
--- /dev/null
+++ b/vendor/go.uber.org/fx/annotated.go
@@ -0,0 +1,1658 @@
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "go.uber.org/dig"
+ "go.uber.org/fx/internal/fxreflect"
+)
+
+// Annotated annotates a constructor provided to Fx with additional options.
+//
+// For example,
+//
+// func NewReadOnlyConnection(...) (*Connection, error)
+//
+// fx.Provide(fx.Annotated{
+// Name: "ro",
+// Target: NewReadOnlyConnection,
+// })
+//
+// Is equivalent to,
+//
+// type result struct {
+// fx.Out
+//
+// Connection *Connection `name:"ro"`
+// }
+//
+// fx.Provide(func(...) (result, error) {
+// conn, err := NewReadOnlyConnection(...)
+// return result{Connection: conn}, err
+// })
+//
+// Annotated cannot be used with constructors which produce fx.Out objects.
+//
+// When used with fx.Supply, the target is a value rather than a constructor function.
+type Annotated struct {
+ // If specified, this will be used as the name for all non-error values returned
+ // by the constructor. For more information on named values, see the documentation
+ // for the fx.Out type.
+ //
+ // A name option may not be provided if a group option is provided.
+ Name string
+
+ // If specified, this will be used as the group name for all non-error values returned
+ // by the constructor. For more information on value groups, see the package documentation.
+ //
+ // A group option may not be provided if a name option is provided.
+ //
+ // Similar to group tags, the group name may be followed by a `,flatten`
+ // option to indicate that each element in the slice returned by the
+ // constructor should be injected into the value group individually.
+ Group string
+
+ // Target is the constructor or value being annotated with fx.Annotated.
+ Target interface{}
+}
+
+func (a Annotated) String() string {
+ var fields []string
+ if len(a.Name) > 0 {
+ fields = append(fields, fmt.Sprintf("Name: %q", a.Name))
+ }
+ if len(a.Group) > 0 {
+ fields = append(fields, fmt.Sprintf("Group: %q", a.Group))
+ }
+ if a.Target != nil {
+ fields = append(fields, fmt.Sprintf("Target: %v", fxreflect.FuncName(a.Target)))
+ }
+ return fmt.Sprintf("fx.Annotated{%v}", strings.Join(fields, ", "))
+}
+
+var (
+ // field used for embedding fx.In type in generated struct.
+ _inAnnotationField = reflect.StructField{
+ Name: "In",
+ Type: reflect.TypeOf(In{}),
+ Anonymous: true,
+ }
+ // field used for embedding fx.Out type in generated struct.
+ _outAnnotationField = reflect.StructField{
+ Name: "Out",
+ Type: reflect.TypeOf(Out{}),
+ Anonymous: true,
+ }
+)
+
+// Annotation can be passed to Annotate(f interface{}, anns ...Annotation)
+// for annotating the parameter and result types of a function.
+type Annotation interface {
+ apply(*annotated) error
+ build(*annotated) (interface{}, error)
+}
+
+var (
+ _typeOfError reflect.Type = reflect.TypeOf((*error)(nil)).Elem()
+ _nilError = reflect.Zero(_typeOfError)
+)
+
+// annotationError is a wrapper for an error that was encountered while
+// applying annotation to a function. It contains the specific error
+// that it encountered as well as the target interface that was attempted
+// to be annotated.
+type annotationError struct {
+ target interface{}
+ err error
+}
+
+func (e *annotationError) Error() string {
+ return e.err.Error()
+}
+
+type paramTagsAnnotation struct {
+ tags []string
+}
+
+var _ Annotation = paramTagsAnnotation{}
+
+// Given func(T1, T2, T3, ..., TN), this generates a type roughly
+// equivalent to,
+//
+// struct {
+// fx.In
+//
+// Field1 T1 `$tags[0]`
+// Field2 T2 `$tags[1]`
+// ...
+// FieldN TN `$tags[N-1]`
+// }
+//
+// If there has already been a ParamTag that was applied, this
+// will return an error.
+
+func (pt paramTagsAnnotation) apply(ann *annotated) error {
+ if len(ann.ParamTags) > 0 {
+ return errors.New("cannot apply more than one line of ParamTags")
+ }
+ ann.ParamTags = pt.tags
+ return nil
+}
+
+// build builds and returns a constructor after applying a ParamTags annotation
+func (pt paramTagsAnnotation) build(ann *annotated) (interface{}, error) {
+ paramTypes, remap := pt.parameters(ann)
+ resultTypes, _ := ann.currentResultTypes()
+
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf(paramTypes, resultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
+ args = remap(args)
+ return origFn.Call(args)
+ })
+ return newFn.Interface(), nil
+}
+
+// parameters returns the type for the parameters of the annotated function,
+// and a function that maps the arguments of the annotated function
+// back to the arguments of the target function.
+func (pt paramTagsAnnotation) parameters(ann *annotated) (
+ types []reflect.Type,
+ remap func([]reflect.Value) []reflect.Value,
+) {
+ ft := reflect.TypeOf(ann.Target)
+ types = make([]reflect.Type, ft.NumIn())
+ for i := 0; i < ft.NumIn(); i++ {
+ types[i] = ft.In(i)
+ }
+
+ // No parameter annotations. Return the original types
+ // and an identity function.
+ if len(pt.tags) == 0 {
+ return types, func(args []reflect.Value) []reflect.Value {
+ return args
+ }
+ }
+
+ // Turn parameters into an fx.In struct.
+ inFields := []reflect.StructField{_inAnnotationField}
+
+ // there was a variadic argument, so it was pre-transformed
+ if len(types) > 0 && isIn(types[0]) {
+ paramType := types[0]
+
+ for i := 1; i < paramType.NumField(); i++ {
+ origField := paramType.Field(i)
+ field := reflect.StructField{
+ Name: origField.Name,
+ Type: origField.Type,
+ Tag: origField.Tag,
+ }
+ if i-1 < len(pt.tags) {
+ field.Tag = reflect.StructTag(pt.tags[i-1])
+ }
+
+ inFields = append(inFields, field)
+ }
+
+ types = []reflect.Type{reflect.StructOf(inFields)}
+ return types, func(args []reflect.Value) []reflect.Value {
+ param := args[0]
+ args[0] = reflect.New(paramType).Elem()
+ for i := 1; i < paramType.NumField(); i++ {
+ args[0].Field(i).Set(param.Field(i))
+ }
+ return args
+ }
+ }
+
+ for i, t := range types {
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: t,
+ }
+ if i < len(pt.tags) {
+ field.Tag = reflect.StructTag(pt.tags[i])
+ }
+
+ inFields = append(inFields, field)
+ }
+
+ types = []reflect.Type{reflect.StructOf(inFields)}
+ return types, func(args []reflect.Value) []reflect.Value {
+ params := args[0]
+ args = args[:0]
+ for i := 0; i < ft.NumIn(); i++ {
+ args = append(args, params.Field(i+1))
+ }
+ return args
+ }
+}
+
+// ParamTags is an Annotation that annotates the parameter(s) of a function.
+// When multiple tags are specified, each tag is mapped to the corresponding
+// positional parameter.
+func ParamTags(tags ...string) Annotation {
+ return paramTagsAnnotation{tags}
+}
+
+type resultTagsAnnotation struct {
+ tags []string
+}
+
+var _ Annotation = resultTagsAnnotation{}
+
+// Given func(T1, T2, T3, ..., TN), this generates a type roughly
+// equivalent to,
+//
+// struct {
+// fx.Out
+//
+// Field1 T1 `$tags[0]`
+// Field2 T2 `$tags[1]`
+// ...
+// FieldN TN `$tags[N-1]`
+// }
+//
+// If there has already been a ResultTag that was applied, this
+// will return an error.
+func (rt resultTagsAnnotation) apply(ann *annotated) error {
+ if len(ann.ResultTags) > 0 {
+ return errors.New("cannot apply more than one line of ResultTags")
+ }
+ ann.ResultTags = rt.tags
+ return nil
+}
+
+// build builds and returns a constructor after applying a ResultTags annotation
+func (rt resultTagsAnnotation) build(ann *annotated) (interface{}, error) {
+ paramTypes := ann.currentParamTypes()
+ resultTypes, remapResults := rt.results(ann)
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf(paramTypes, resultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
+ results := origFn.Call(args)
+ return remapResults(results)
+ })
+ return newFn.Interface(), nil
+}
+
+// results returns the types of the results of the annotated function,
+// and a function that maps the results of the target function,
+// into a result compatible with the annotated function.
+func (rt resultTagsAnnotation) results(ann *annotated) (
+ types []reflect.Type,
+ remap func([]reflect.Value) []reflect.Value,
+) {
+ types, hasError := ann.currentResultTypes()
+
+ if hasError {
+ types = types[:len(types)-1]
+ }
+
+ // No result annotations. Return the original types
+ // and an identity function.
+ if len(rt.tags) == 0 {
+ return types, func(results []reflect.Value) []reflect.Value {
+ return results
+ }
+ }
+
+ // if there's no Out struct among the return types, there was no As annotation applied
+ // just replace original result types with an Out struct and apply tags
+ var (
+ newOut outStructInfo
+ existingOuts []reflect.Type
+ )
+
+ newOut.Fields = []reflect.StructField{_outAnnotationField}
+ newOut.Offsets = []int{}
+
+ for i, t := range types {
+ if !isOut(t) {
+ // this must be from the original function.
+ // apply the tags
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: t,
+ }
+ if i < len(rt.tags) {
+ field.Tag = reflect.StructTag(rt.tags[i])
+ }
+ newOut.Offsets = append(newOut.Offsets, len(newOut.Fields))
+ newOut.Fields = append(newOut.Fields, field)
+ continue
+ }
+ // this must be from an As annotation
+ // apply the tags to the existing type
+ taggedFields := make([]reflect.StructField, t.NumField())
+ taggedFields[0] = _outAnnotationField
+ for j, tag := range rt.tags {
+ if j+1 < t.NumField() {
+ field := t.Field(j + 1)
+ taggedFields[j+1] = reflect.StructField{
+ Name: field.Name,
+ Type: field.Type,
+ Tag: reflect.StructTag(tag),
+ }
+ }
+ }
+ existingOuts = append(existingOuts, reflect.StructOf(taggedFields))
+ }
+
+ resType := reflect.StructOf(newOut.Fields)
+
+ outTypes := []reflect.Type{resType}
+ // append existing outs back to outTypes
+ outTypes = append(outTypes, existingOuts...)
+ if hasError {
+ outTypes = append(outTypes, _typeOfError)
+ }
+
+ return outTypes, func(results []reflect.Value) []reflect.Value {
+ var (
+ outErr error
+ outResults []reflect.Value
+ )
+ outResults = append(outResults, reflect.New(resType).Elem())
+
+ tIdx := 0
+ for i, r := range results {
+ if i == len(results)-1 && hasError {
+ // If hasError and this is the last item,
+ // we are guaranteed that this is an error
+ // object.
+ if err, _ := r.Interface().(error); err != nil {
+ outErr = err
+ }
+ continue
+ }
+ if i < len(newOut.Offsets) {
+ if fieldIdx := newOut.Offsets[i]; fieldIdx > 0 {
+ // fieldIdx 0 is an invalid index
+ // because it refers to uninitialized
+ // outs and would point to fx.Out in the
+ // struct definition. We need to check this
+ // to prevent panic from setting fx.Out to
+ // a value.
+ outResults[0].Field(fieldIdx).Set(r)
+ }
+ continue
+ }
+ if isOut(r.Type()) {
+ tIdx++
+ if tIdx < len(outTypes) {
+ newResult := reflect.New(outTypes[tIdx]).Elem()
+ for j := 1; j < outTypes[tIdx].NumField(); j++ {
+ newResult.Field(j).Set(r.Field(j))
+ }
+ outResults = append(outResults, newResult)
+ }
+ }
+ }
+
+ if hasError {
+ if outErr != nil {
+ outResults = append(outResults, reflect.ValueOf(outErr))
+ } else {
+ outResults = append(outResults, _nilError)
+ }
+ }
+
+ return outResults
+ }
+}
+
+// ResultTags is an Annotation that annotates the result(s) of a function.
+// When multiple tags are specified, each tag is mapped to the corresponding
+// positional result.
+func ResultTags(tags ...string) Annotation {
+ return resultTagsAnnotation{tags}
+}
+
+type outStructInfo struct {
+ Fields []reflect.StructField // fields of the struct
+ Offsets []int // Offsets[i] is the index of result i in Fields
+}
+
+type _lifecycleHookAnnotationType int
+
+const (
+ _unknownHookType _lifecycleHookAnnotationType = iota
+ _onStartHookType
+ _onStopHookType
+)
+
+type lifecycleHookAnnotation struct {
+ Type _lifecycleHookAnnotationType
+ Target interface{}
+}
+
+var _ Annotation = (*lifecycleHookAnnotation)(nil)
+
+func (la *lifecycleHookAnnotation) String() string {
+ name := "UnknownHookAnnotation"
+ switch la.Type {
+ case _onStartHookType:
+ name = _onStartHook
+ case _onStopHookType:
+ name = _onStopHook
+ }
+ return name
+}
+
+func (la *lifecycleHookAnnotation) apply(ann *annotated) error {
+ if la.Target == nil {
+ return fmt.Errorf(
+ "cannot use nil function for %q hook annotation",
+ la,
+ )
+ }
+
+ for _, h := range ann.Hooks {
+ if la.Type == h.Type {
+ return fmt.Errorf(
+ "cannot apply more than one %q hook annotation",
+ la,
+ )
+ }
+ }
+
+ ft := reflect.TypeOf(la.Target)
+
+ if ft.Kind() != reflect.Func {
+ return fmt.Errorf(
+ "must provide function for %q hook, got %v (%T)",
+ la,
+ la.Target,
+ la.Target,
+ )
+ }
+
+ if n := ft.NumOut(); n > 0 {
+ if n > 1 || ft.Out(0) != _typeOfError {
+ return fmt.Errorf(
+ "optional hook return may only be an error, got %v (%T)",
+ la.Target,
+ la.Target,
+ )
+ }
+ }
+
+ if ft.IsVariadic() {
+ return fmt.Errorf(
+ "hooks must not accept variadic parameters, got %v (%T)",
+ la.Target,
+ la.Target,
+ )
+ }
+
+ ann.Hooks = append(ann.Hooks, la)
+ return nil
+}
+
+// build builds and returns a constructor after applying a lifecycle hook annotation.
+func (la *lifecycleHookAnnotation) build(ann *annotated) (interface{}, error) {
+ resultTypes, hasError := ann.currentResultTypes()
+ if !hasError {
+ resultTypes = append(resultTypes, _typeOfError)
+ }
+
+ hookInstaller, paramTypes, remapParams := la.buildHookInstaller(ann)
+
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf(paramTypes, resultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
+ // copy the original arguments before remapping the parameters
+ // so that we can apply them to the hookInstaller.
+ origArgs := make([]reflect.Value, len(args))
+ copy(origArgs, args)
+ args = remapParams(args)
+ results := origFn.Call(args)
+ if hasError {
+ errVal := results[len(results)-1]
+ results = results[:len(results)-1]
+ if err, _ := errVal.Interface().(error); err != nil {
+ // if constructor returned error, do not call hook installer
+ return append(results, errVal)
+ }
+ }
+ hookInstallerResults := hookInstaller.Call(append(results, origArgs...))
+ results = append(results, hookInstallerResults[0])
+ return results
+ })
+ return newFn.Interface(), nil
+}
+
+var (
+ _typeOfLifecycle reflect.Type = reflect.TypeOf((*Lifecycle)(nil)).Elem()
+ _typeOfContext reflect.Type = reflect.TypeOf((*context.Context)(nil)).Elem()
+)
+
+// buildHookInstaller returns a function that appends a hook to Lifecycle when called,
+// along with the new paramter types and a function that maps arguments to the annotated constructor
+func (la *lifecycleHookAnnotation) buildHookInstaller(ann *annotated) (
+ hookInstaller reflect.Value,
+ paramTypes []reflect.Type,
+ remapParams func([]reflect.Value) []reflect.Value, // function to remap parameters to function being annotated
+) {
+ paramTypes = ann.currentParamTypes()
+ paramTypes, remapParams = injectLifecycle(paramTypes)
+
+ resultTypes, hasError := ann.currentResultTypes()
+ if hasError {
+ resultTypes = resultTypes[:len(resultTypes)-1]
+ }
+
+ // look for the context.Context type from the original hook function
+ // and then exclude it from the paramTypes of invokeFn because context.Context
+ // will be injected by the lifecycle
+ ctxPos := -1
+ ctxStructPos := -1
+ origHookFn := reflect.ValueOf(la.Target)
+ origHookFnT := reflect.TypeOf(la.Target)
+ invokeParamTypes := []reflect.Type{
+ _typeOfLifecycle,
+ }
+ for i := 0; i < origHookFnT.NumIn(); i++ {
+ t := origHookFnT.In(i)
+ if t == _typeOfContext && ctxPos < 0 {
+ ctxPos = i
+ continue
+ }
+ if !isIn(t) {
+ invokeParamTypes = append(invokeParamTypes, origHookFnT.In(i))
+ continue
+ }
+ fields := []reflect.StructField{_inAnnotationField}
+ for j := 1; j < t.NumField(); j++ {
+ field := t.Field(j)
+ if field.Type == _typeOfContext && ctxPos < 0 {
+ ctxStructPos = i
+ ctxPos = j
+ continue
+ }
+ fields = append(fields, field)
+ }
+ invokeParamTypes = append(invokeParamTypes, reflect.StructOf(fields))
+
+ }
+ invokeFnT := reflect.FuncOf(invokeParamTypes, []reflect.Type{}, false)
+ invokeFn := reflect.MakeFunc(invokeFnT, func(args []reflect.Value) (results []reflect.Value) {
+ lc := args[0].Interface().(Lifecycle)
+ args = args[1:]
+ hookArgs := make([]reflect.Value, origHookFnT.NumIn())
+
+ hookFn := func(ctx context.Context) (err error) {
+ // If the hook function has multiple parameters, and the first
+ // parameter is a context, inject the provided context.
+ if ctxStructPos < 0 {
+ offset := 0
+ for i := 0; i < len(hookArgs); i++ {
+ if i == ctxPos {
+ hookArgs[i] = reflect.ValueOf(ctx)
+ offset = 1
+ continue
+ }
+ if i-offset >= 0 && i-offset < len(args) {
+ hookArgs[i] = args[i-offset]
+ }
+ }
+ } else {
+ for i := 0; i < origHookFnT.NumIn(); i++ {
+ if i != ctxStructPos {
+ hookArgs[i] = args[i]
+ continue
+ }
+ t := origHookFnT.In(i)
+ v := reflect.New(t).Elem()
+ for j := 1; j < t.NumField(); j++ {
+ if j < ctxPos {
+ v.Field(j).Set(args[i].Field(j))
+ } else if j == ctxPos {
+ v.Field(j).Set(reflect.ValueOf(ctx))
+ } else {
+ v.Field(j).Set(args[i].Field(j - 1))
+ }
+ }
+ hookArgs[i] = v
+ }
+ }
+ hookResults := origHookFn.Call(hookArgs)
+ if len(hookResults) > 0 && hookResults[0].Type() == _typeOfError {
+ err, _ = hookResults[0].Interface().(error)
+ }
+ return err
+ }
+ lc.Append(la.buildHook(hookFn))
+ return results
+ })
+
+ installerType := reflect.FuncOf(append(resultTypes, paramTypes...), []reflect.Type{_typeOfError}, false)
+ hookInstaller = reflect.MakeFunc(installerType, func(args []reflect.Value) (results []reflect.Value) {
+ // build a private scope for hook function
+ var scope *dig.Scope
+ switch la.Type {
+ case _onStartHookType:
+ scope = ann.container.Scope("onStartHookScope")
+ case _onStopHookType:
+ scope = ann.container.Scope("onStopHookScope")
+ }
+
+ // provide the private scope with the current dependencies and results of the annotated function
+ results = []reflect.Value{_nilError}
+ ctor := makeHookScopeCtor(paramTypes, resultTypes, args)
+ if err := scope.Provide(ctor); err != nil {
+ results[0] = reflect.ValueOf(fmt.Errorf("error providing possible parameters for hook installer: %w", err))
+ return results
+ }
+
+ // invoking invokeFn appends the hook function to lifecycle
+ if err := scope.Invoke(invokeFn.Interface()); err != nil {
+ results[0] = reflect.ValueOf(fmt.Errorf("error invoking hook installer: %w", err))
+ return results
+ }
+ return results
+ })
+ return hookInstaller, paramTypes, remapParams
+}
+
+var (
+ _nameTag = "name"
+ _groupTag = "group"
+)
+
+// makeHookScopeCtor makes a constructor that provides all possible parameters
+// that the lifecycle hook being appended can depend on. It also deduplicates
+// duplicate param and result types, which is possible when using fx.Decorate,
+// and uses values from results for providing the deduplicated types.
+func makeHookScopeCtor(paramTypes []reflect.Type, resultTypes []reflect.Type, args []reflect.Value) interface{} {
+ type key struct {
+ t reflect.Type
+ name string
+ group string
+ }
+ seen := map[key]struct{}{}
+ outTypes := make([]reflect.Type, len(resultTypes))
+ for i, t := range resultTypes {
+ outTypes[i] = t
+ if isOut(t) {
+ for j := 1; j < t.NumField(); j++ {
+ field := t.Field(j)
+ seen[key{
+ t: field.Type,
+ name: field.Tag.Get(_nameTag),
+ group: field.Tag.Get(_groupTag),
+ }] = struct{}{}
+ }
+ continue
+ }
+ seen[key{t: t}] = struct{}{}
+ }
+
+ fields := []reflect.StructField{_outAnnotationField}
+
+ skippedParams := make([][]int, len(paramTypes))
+
+ for i, t := range paramTypes {
+ skippedParams[i] = []int{}
+ if isIn(t) {
+ for j := 1; j < t.NumField(); j++ {
+ origField := t.Field(j)
+ k := key{
+ t: origField.Type,
+ name: origField.Tag.Get(_nameTag),
+ group: origField.Tag.Get(_groupTag),
+ }
+
+ if _, ok := seen[k]; ok {
+ skippedParams[i] = append(skippedParams[i], j)
+ continue
+ }
+
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", j-1),
+ Type: origField.Type,
+ Tag: origField.Tag,
+ }
+ fields = append(fields, field)
+ }
+ continue
+ }
+ k := key{t: t}
+
+ if _, ok := seen[k]; ok {
+ skippedParams[i] = append(skippedParams[i], i)
+ continue
+ }
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: t,
+ }
+ fields = append(fields, field)
+ }
+
+ outTypes = append(outTypes, reflect.StructOf(fields))
+ ctorType := reflect.FuncOf([]reflect.Type{}, outTypes, false)
+ ctor := reflect.MakeFunc(ctorType, func(_ []reflect.Value) []reflect.Value {
+ nOut := len(outTypes)
+ results := make([]reflect.Value, nOut)
+ for i := 0; i < nOut-1; i++ {
+ results[i] = args[i]
+ }
+
+ v := reflect.New(outTypes[nOut-1]).Elem()
+ fieldIdx := 1
+ for i := nOut - 1; i < len(args); i++ {
+ paramIdx := i - (nOut - 1)
+ if isIn(paramTypes[paramIdx]) {
+ skippedIdx := 0
+ for j := 1; j < paramTypes[paramIdx].NumField(); j++ {
+ if len(skippedParams[paramIdx]) > 0 && skippedParams[paramIdx][skippedIdx] == j {
+ // skip
+ skippedIdx++
+ continue
+ }
+ v.Field(fieldIdx).Set(args[i].Field(j))
+ fieldIdx++
+ }
+ } else {
+ if len(skippedParams[paramIdx]) > 0 && skippedParams[paramIdx][0] == paramIdx {
+ continue
+ }
+ v.Field(fieldIdx).Set(args[i])
+ fieldIdx++
+ }
+ }
+ results[nOut-1] = v
+
+ return results
+ })
+ return ctor.Interface()
+}
+
+func injectLifecycle(paramTypes []reflect.Type) ([]reflect.Type, func([]reflect.Value) []reflect.Value) {
+ // since lifecycle already exists in param types, no need to inject again
+ if lifecycleExists(paramTypes) {
+ return paramTypes, func(args []reflect.Value) []reflect.Value {
+ return args
+ }
+ }
+ // If params are tagged or there's an untagged variadic arguement,
+ // add a Lifecycle field to the param struct
+ if len(paramTypes) > 0 && isIn(paramTypes[0]) {
+ taggedParam := paramTypes[0]
+ fields := []reflect.StructField{
+ taggedParam.Field(0),
+ {
+ Name: "Lifecycle",
+ Type: _typeOfLifecycle,
+ },
+ }
+ for i := 1; i < taggedParam.NumField(); i++ {
+ fields = append(fields, taggedParam.Field(i))
+ }
+ newParamType := reflect.StructOf(fields)
+ return []reflect.Type{newParamType}, func(args []reflect.Value) []reflect.Value {
+ param := args[0]
+ args[0] = reflect.New(taggedParam).Elem()
+ for i := 1; i < taggedParam.NumField(); i++ {
+ args[0].Field(i).Set(param.Field(i + 1))
+ }
+ return args
+ }
+ }
+
+ return append([]reflect.Type{_typeOfLifecycle}, paramTypes...), func(args []reflect.Value) []reflect.Value {
+ return args[1:]
+ }
+}
+
+func lifecycleExists(paramTypes []reflect.Type) bool {
+ for _, t := range paramTypes {
+ if t == _typeOfLifecycle {
+ return true
+ }
+ if isIn(t) {
+ for i := 1; i < t.NumField(); i++ {
+ if t.Field(i).Type == _typeOfLifecycle {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (la *lifecycleHookAnnotation) buildHook(fn func(context.Context) error) (hook Hook) {
+ switch la.Type {
+ case _onStartHookType:
+ hook.OnStart = fn
+ case _onStopHookType:
+ hook.OnStop = fn
+ }
+ return hook
+}
+
+// OnStart is an Annotation that appends an OnStart Hook to the application
+// Lifecycle when that function is called. This provides a way to create
+// Lifecycle OnStart (see Lifecycle type documentation) hooks without building a
+// function that takes a dependency on the Lifecycle type.
+//
+// fx.Provide(
+// fx.Annotate(
+// NewServer,
+// fx.OnStart(func(ctx context.Context, server Server) error {
+// return server.Listen(ctx)
+// }),
+// )
+// )
+//
+// Which is functionally the same as:
+//
+// fx.Provide(
+// func(lifecycle fx.Lifecycle, p Params) Server {
+// server := NewServer(p)
+// lifecycle.Append(fx.Hook{
+// OnStart: func(ctx context.Context) error {
+// return server.Listen(ctx)
+// },
+// })
+// return server
+// }
+// )
+//
+// It is also possible to use OnStart annotation with other parameter and result
+// annotations, provided that the parameter of the function passed to OnStart
+// matches annotated parameters and results.
+//
+// For example, the following is possible:
+//
+// fx.Provide(
+// fx.Annotate(
+// func (a A) B {...},
+// fx.ParamTags(`name:"A"`),
+// fx.ResultTags(`name:"B"`),
+// fx.OnStart(func (p OnStartParams) {...}),
+// ),
+// )
+//
+// As long as OnStartParams looks like the following and has no other dependencies
+// besides Context or Lifecycle:
+//
+// type OnStartParams struct {
+// fx.In
+// FieldA A `name:"A"`
+// FieldB B `name:"B"`
+// }
+//
+// Only one OnStart annotation may be applied to a given function at a time,
+// however functions may be annotated with other types of lifecylce Hooks, such
+// as OnStart. The hook function passed into OnStart cannot take any arguments
+// outside of the annotated constructor's existing dependencies or results, except
+// a context.Context.
+func OnStart(onStart interface{}) Annotation {
+ return &lifecycleHookAnnotation{
+ Type: _onStartHookType,
+ Target: onStart,
+ }
+}
+
+// OnStop is an Annotation that appends an OnStop Hook to the application
+// Lifecycle when that function is called. This provides a way to create
+// Lifecycle OnStop (see Lifecycle type documentation) hooks without building a
+// function that takes a dependency on the Lifecycle type.
+//
+// fx.Provide(
+// fx.Annotate(
+// NewServer,
+// fx.OnStop(func(ctx context.Context, server Server) error {
+// return server.Shutdown(ctx)
+// }),
+// )
+// )
+//
+// Which is functionally the same as:
+//
+// fx.Provide(
+// func(lifecycle fx.Lifecycle, p Params) Server {
+// server := NewServer(p)
+// lifecycle.Append(fx.Hook{
+// OnStop: func(ctx context.Context) error {
+// return server.Shutdown(ctx)
+// },
+// })
+// return server
+// }
+// )
+//
+// It is also possible to use OnStop annotation with other parameter and result
+// annotations, provided that the parameter of the function passed to OnStop
+// matches annotated parameters and results.
+//
+// For example, the following is possible:
+//
+// fx.Provide(
+// fx.Annotate(
+// func (a A) B {...},
+// fx.ParamTags(`name:"A"`),
+// fx.ResultTags(`name:"B"`),
+// fx.OnStop(func (p OnStopParams) {...}),
+// ),
+// )
+//
+// As long as OnStopParams looks like the following and has no other dependencies
+// besides Context or Lifecycle:
+//
+// type OnStopParams struct {
+// fx.In
+// FieldA A `name:"A"`
+// FieldB B `name:"B"`
+// }
+//
+// Only one OnStop annotation may be applied to a given function at a time,
+// however functions may be annotated with other types of lifecylce Hooks, such
+// as OnStop. The hook function passed into OnStop cannot take any arguments
+// outside of the annotated constructor's existing dependencies or results, except
+// a context.Context.
+func OnStop(onStop interface{}) Annotation {
+ return &lifecycleHookAnnotation{
+ Type: _onStopHookType,
+ Target: onStop,
+ }
+}
+
+type asAnnotation struct {
+ targets []interface{}
+ types []reflect.Type
+}
+
+func isOut(t reflect.Type) bool {
+ return (t.Kind() == reflect.Struct &&
+ dig.IsOut(reflect.New(t).Elem().Interface()))
+}
+
+func isIn(t reflect.Type) bool {
+ return (t.Kind() == reflect.Struct &&
+ dig.IsIn(reflect.New(t).Elem().Interface()))
+}
+
+var _ Annotation = (*asAnnotation)(nil)
+
+// As is an Annotation that annotates the result of a function (i.e. a
+// constructor) to be provided as another interface.
+//
+// For example, the following code specifies that the return type of
+// bytes.NewBuffer (bytes.Buffer) should be provided as io.Writer type:
+//
+// fx.Provide(
+// fx.Annotate(bytes.NewBuffer(...), fx.As(new(io.Writer)))
+// )
+//
+// In other words, the code above is equivalent to:
+//
+// fx.Provide(func() io.Writer {
+// return bytes.NewBuffer()
+// // provides io.Writer instead of *bytes.Buffer
+// })
+//
+// Note that the bytes.Buffer type is provided as an io.Writer type, so this
+// constructor does NOT provide both bytes.Buffer and io.Writer type; it just
+// provides io.Writer type.
+//
+// When multiple values are returned by the annotated function, each type
+// gets mapped to corresponding positional result of the annotated function.
+//
+// For example,
+//
+// func a() (bytes.Buffer, bytes.Buffer) {
+// ...
+// }
+// fx.Provide(
+// fx.Annotate(a, fx.As(new(io.Writer), new(io.Reader)))
+// )
+//
+// Is equivalent to,
+//
+// fx.Provide(func() (io.Writer, io.Reader) {
+// w, r := a()
+// return w, r
+// }
+func As(interfaces ...interface{}) Annotation {
+ return &asAnnotation{targets: interfaces}
+}
+
+func (at *asAnnotation) apply(ann *annotated) error {
+ at.types = make([]reflect.Type, len(at.targets))
+ for i, typ := range at.targets {
+ t := reflect.TypeOf(typ)
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Interface {
+ return fmt.Errorf("fx.As: argument must be a pointer to an interface: got %v", t)
+ }
+ t = t.Elem()
+ at.types[i] = t
+ }
+
+ ann.As = append(ann.As, at.types)
+ return nil
+}
+
+// build implements Annotation
+func (at *asAnnotation) build(ann *annotated) (interface{}, error) {
+ paramTypes := ann.currentParamTypes()
+
+ resultTypes, remapResults, err := at.results(ann)
+ if err != nil {
+ return nil, err
+ }
+
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf(paramTypes, resultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
+ results := origFn.Call(args)
+ return remapResults(results)
+ })
+ return newFn.Interface(), nil
+}
+
+func (at *asAnnotation) results(ann *annotated) (
+ types []reflect.Type,
+ remap func([]reflect.Value) []reflect.Value,
+ err error,
+) {
+ types, hasError := ann.currentResultTypes()
+ fields := []reflect.StructField{_outAnnotationField}
+ if hasError {
+ types = types[:len(types)-1]
+ }
+ resultFields, getResult := extractResultFields(types)
+
+ for i, f := range resultFields {
+ t := f.Type
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: t,
+ Tag: f.Tag,
+ }
+ if i < len(at.types) {
+ if !t.Implements(at.types[i]) {
+ return nil, nil, fmt.Errorf("invalid fx.As: %v does not implement %v", t, at.types[i])
+ }
+ field.Type = at.types[i]
+ }
+ fields = append(fields, field)
+ }
+ resType := reflect.StructOf(fields)
+
+ var outTypes []reflect.Type
+ outTypes = append(types, resType)
+ if hasError {
+ outTypes = append(outTypes, _typeOfError)
+ }
+
+ return outTypes, func(results []reflect.Value) []reflect.Value {
+ var (
+ outErr error
+ outResults []reflect.Value
+ )
+
+ for i, r := range results {
+ if i == len(results)-1 && hasError {
+ // If hasError and this is the last item,
+ // we are guaranteed that this is an error
+ // object.
+ if err, _ := r.Interface().(error); err != nil {
+ outErr = err
+ }
+ continue
+ }
+ outResults = append(outResults, r)
+ }
+
+ newOutResult := reflect.New(resType).Elem()
+ for i := 1; i < resType.NumField(); i++ {
+ newOutResult.Field(i).Set(getResult(i, results))
+ }
+ outResults = append(outResults, newOutResult)
+
+ if hasError {
+ if outErr != nil {
+ outResults = append(outResults, reflect.ValueOf(outErr))
+ } else {
+ outResults = append(outResults, _nilError)
+ }
+ }
+
+ return outResults
+ }, nil
+}
+
+func extractResultFields(types []reflect.Type) ([]reflect.StructField, func(int, []reflect.Value) reflect.Value) {
+ var resultFields []reflect.StructField
+ if len(types) > 0 && isOut(types[0]) {
+ for i := 1; i < types[0].NumField(); i++ {
+ resultFields = append(resultFields, types[0].Field(i))
+ }
+ return resultFields, func(idx int, results []reflect.Value) reflect.Value {
+ return results[0].Field(idx)
+ }
+ }
+ for i, t := range types {
+ if isOut(t) {
+ continue
+ }
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: t,
+ }
+ resultFields = append(resultFields, field)
+ }
+ return resultFields, func(idx int, results []reflect.Value) reflect.Value {
+ return results[idx-1]
+ }
+}
+
+type fromAnnotation struct {
+ targets []interface{}
+ types []reflect.Type
+}
+
+var _ Annotation = (*fromAnnotation)(nil)
+
+// From is an [Annotation] that annotates the parameter(s) for a function (i.e. a
+// constructor) to be accepted from other provided types. It is analogous to the
+// [As] for parameter types to the constructor.
+//
+// For example,
+//
+// type Runner interface { Run() }
+// func NewFooRunner() *FooRunner // implements Runner
+// func NewRunnerWrap(r Runner) *RunnerWrap
+//
+// fx.Provide(
+// fx.Annotate(
+// NewRunnerWrap,
+// fx.From(new(*FooRunner)),
+// ),
+// )
+//
+// Is equivalent to,
+//
+// fx.Provide(func(r *FooRunner) *RunnerWrap {
+// // need *FooRunner instead of Runner
+// return NewRunnerWrap(r)
+// })
+//
+// When the annotated function takes in multiple parameters, each type gets
+// mapped to corresponding positional parameter of the annotated function
+//
+// For example,
+//
+// func NewBarRunner() *BarRunner // implements Runner
+// func NewRunnerWraps(r1 Runner, r2 Runner) *RunnerWraps
+//
+// fx.Provide(
+// fx.Annotate(
+// NewRunnerWraps,
+// fx.From(new(*FooRunner), new(*BarRunner)),
+// ),
+// )
+//
+// Is equivalent to,
+//
+// fx.Provide(func(r1 *FooRunner, r2 *BarRunner) *RunnerWraps {
+// return NewRunnerWraps(r1, r2)
+// })
+func From(interfaces ...interface{}) Annotation {
+ return &fromAnnotation{targets: interfaces}
+}
+
+func (fr *fromAnnotation) apply(ann *annotated) error {
+ if len(ann.From) > 0 {
+ return errors.New("cannot apply more than one line of From")
+ }
+ ft := reflect.TypeOf(ann.Target)
+ fr.types = make([]reflect.Type, len(fr.targets))
+ for i, typ := range fr.targets {
+ if ft.IsVariadic() && i == ft.NumIn()-1 {
+ return errors.New("fx.From: cannot annotate a variadic argument")
+ }
+ t := reflect.TypeOf(typ)
+ if t == nil || t.Kind() != reflect.Ptr {
+ return fmt.Errorf("fx.From: argument must be a pointer to a type that implements some interface: got %v", t)
+ }
+ fr.types[i] = t.Elem()
+ }
+ ann.From = fr.types
+ return nil
+}
+
+// build builds and returns a constructor after applying a From annotation
+func (fr *fromAnnotation) build(ann *annotated) (interface{}, error) {
+ paramTypes, remap, err := fr.parameters(ann)
+ if err != nil {
+ return nil, err
+ }
+ resultTypes, _ := ann.currentResultTypes()
+
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf(paramTypes, resultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
+ args = remap(args)
+ return origFn.Call(args)
+ })
+ return newFn.Interface(), nil
+}
+
+// parameters returns the type for the parameters of the annotated function,
+// and a function that maps the arguments of the annotated function
+// back to the arguments of the target function.
+func (fr *fromAnnotation) parameters(ann *annotated) (
+ types []reflect.Type,
+ remap func([]reflect.Value) []reflect.Value,
+ err error,
+) {
+ ft := reflect.TypeOf(ann.Target)
+ types = make([]reflect.Type, ft.NumIn())
+ for i := 0; i < ft.NumIn(); i++ {
+ types[i] = ft.In(i)
+ }
+
+ // No parameter annotations. Return the original types
+ // and an identity function.
+ if len(fr.targets) == 0 {
+ return types, func(args []reflect.Value) []reflect.Value {
+ return args
+ }, nil
+ }
+
+ // Turn parameters into an fx.In struct.
+ inFields := []reflect.StructField{_inAnnotationField}
+
+ // The following situations may occur:
+ // 1. there was a variadic argument, so it was pre-transformed.
+ // 2. another parameter annotation was transformed (ex: ParamTags).
+ // so need to visit fields of the fx.In struct.
+ if len(types) > 0 && isIn(types[0]) {
+ paramType := types[0]
+
+ for i := 1; i < paramType.NumField(); i++ {
+ origField := paramType.Field(i)
+ field := reflect.StructField{
+ Name: origField.Name,
+ Type: origField.Type,
+ Tag: origField.Tag,
+ }
+ if i-1 < len(fr.types) {
+ t := fr.types[i-1]
+ if !t.Implements(field.Type) {
+ return nil, nil, fmt.Errorf("invalid fx.From: %v does not implement %v", t, field.Type)
+ }
+ field.Type = t
+ }
+
+ inFields = append(inFields, field)
+ }
+
+ types = []reflect.Type{reflect.StructOf(inFields)}
+ return types, func(args []reflect.Value) []reflect.Value {
+ param := args[0]
+ args[0] = reflect.New(paramType).Elem()
+ for i := 1; i < paramType.NumField(); i++ {
+ args[0].Field(i).Set(param.Field(i))
+ }
+ return args
+ }, nil
+ }
+
+ for i, t := range types {
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: t,
+ }
+ if i < len(fr.types) {
+ t := fr.types[i]
+ if !t.Implements(field.Type) {
+ return nil, nil, fmt.Errorf("invalid fx.From: %v does not implement %v", t, field.Type)
+ }
+ field.Type = t
+ }
+
+ inFields = append(inFields, field)
+ }
+
+ types = []reflect.Type{reflect.StructOf(inFields)}
+ return types, func(args []reflect.Value) []reflect.Value {
+ params := args[0]
+ args = args[:0]
+ for i := 0; i < ft.NumIn(); i++ {
+ args = append(args, params.Field(i+1))
+ }
+ return args
+ }, nil
+}
+
+type annotated struct {
+ Target interface{}
+ Annotations []Annotation
+ ParamTags []string
+ ResultTags []string
+ As [][]reflect.Type
+ From []reflect.Type
+ FuncPtr uintptr
+ Hooks []*lifecycleHookAnnotation
+ // container is used to build private scopes for lifecycle hook functions
+ // added via fx.OnStart and fx.OnStop annotations.
+ container *dig.Container
+}
+
+func (ann annotated) String() string {
+ var sb strings.Builder
+ sb.WriteString("fx.Annotate(")
+ sb.WriteString(fxreflect.FuncName(ann.Target))
+ if tags := ann.ParamTags; len(tags) > 0 {
+ fmt.Fprintf(&sb, ", fx.ParamTags(%q)", tags)
+ }
+ if tags := ann.ResultTags; len(tags) > 0 {
+ fmt.Fprintf(&sb, ", fx.ResultTags(%q)", tags)
+ }
+ if as := ann.As; len(as) > 0 {
+ fmt.Fprintf(&sb, ", fx.As(%v)", as)
+ }
+ if from := ann.From; len(from) > 0 {
+ fmt.Fprintf(&sb, ", fx.From(%v)", from)
+ }
+ return sb.String()
+}
+
+// Build builds and returns a constructor based on fx.In/fx.Out params and
+// results wrapping the original constructor passed to fx.Annotate.
+func (ann *annotated) Build() (interface{}, error) {
+ ann.container = dig.New()
+ ft := reflect.TypeOf(ann.Target)
+ if ft.Kind() != reflect.Func {
+ return nil, fmt.Errorf("must provide constructor function, got %v (%T)", ann.Target, ann.Target)
+ }
+
+ if err := ann.typeCheckOrigFn(); err != nil {
+ return nil, fmt.Errorf("invalid annotation function %T: %w", ann.Target, err)
+ }
+
+ ann.applyOptionalTag()
+
+ var (
+ err error
+ lcHookAnns []*lifecycleHookAnnotation
+ )
+ for _, annotation := range ann.Annotations {
+ if lcHookAnn, ok := annotation.(*lifecycleHookAnnotation); ok {
+ lcHookAnns = append(lcHookAnns, lcHookAnn)
+ continue
+ }
+ if ann.Target, err = annotation.build(ann); err != nil {
+ return nil, err
+ }
+ }
+
+ // need to call cleanUpAsResults before applying lifecycle annotations
+ // to exclude the original results from the hook's scope if any
+ // fx.As annotations were applied
+ ann.cleanUpAsResults()
+
+ for _, la := range lcHookAnns {
+ if ann.Target, err = la.build(ann); err != nil {
+ return nil, err
+ }
+ }
+ return ann.Target, nil
+}
+
+// applyOptionalTag checks if function being annotated is variadic
+// and applies optional tag to the variadic argument before
+// applying any other annotations
+func (ann *annotated) applyOptionalTag() {
+ ft := reflect.TypeOf(ann.Target)
+ if !ft.IsVariadic() {
+ return
+ }
+
+ resultTypes, _ := ann.currentResultTypes()
+
+ fields := []reflect.StructField{_inAnnotationField}
+ for i := 0; i < ft.NumIn(); i++ {
+ field := reflect.StructField{
+ Name: fmt.Sprintf("Field%d", i),
+ Type: ft.In(i),
+ }
+ if i == ft.NumIn()-1 {
+ // Mark a variadic argument optional by default
+ // so that just wrapping a function in fx.Annotate does not
+ // suddenly introduce a required []arg dependency.
+ field.Tag = reflect.StructTag(`optional:"true"`)
+ }
+ fields = append(fields, field)
+ }
+ paramType := reflect.StructOf(fields)
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf([]reflect.Type{paramType}, resultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) []reflect.Value {
+ params := args[0]
+ args = args[:0]
+ for i := 0; i < ft.NumIn(); i++ {
+ args = append(args, params.Field(i+1))
+ }
+ return origFn.CallSlice(args)
+ })
+ ann.Target = newFn.Interface()
+}
+
+// cleanUpAsResults does a check to see if an As annotation was applied.
+// If there was any fx.As annotation applied, cleanUpAsResults wraps the
+// function one more time to remove the results from the original function.
+func (ann *annotated) cleanUpAsResults() {
+ // clean up orig function results if there were any As annotations
+ if len(ann.As) < 1 {
+ return
+ }
+ paramTypes := ann.currentParamTypes()
+ resultTypes, hasError := ann.currentResultTypes()
+ numRes := len(ann.As)
+ if hasError {
+ numRes++
+ }
+ newResultTypes := resultTypes[len(resultTypes)-numRes:]
+ origFn := reflect.ValueOf(ann.Target)
+ newFnType := reflect.FuncOf(paramTypes, newResultTypes, false)
+ newFn := reflect.MakeFunc(newFnType, func(args []reflect.Value) (results []reflect.Value) {
+ results = origFn.Call(args)
+ results = results[len(results)-numRes:]
+ return
+ })
+ ann.Target = newFn.Interface()
+}
+
+// checks and returns a non-nil error if the target function:
+// - returns an fx.Out struct as a result.
+// - takes in an fx.In struct as a parameter.
+// - has an error result not as the last result.
+func (ann *annotated) typeCheckOrigFn() error {
+ ft := reflect.TypeOf(ann.Target)
+ numOut := ft.NumOut()
+ for i := 0; i < numOut; i++ {
+ ot := ft.Out(i)
+ if ot == _typeOfError && i != numOut-1 {
+ return fmt.Errorf(
+ "only the last result can be an error: "+
+ "%v (%v) returns error as result %d",
+ fxreflect.FuncName(ann.Target), ft, i)
+ }
+ if ot.Kind() != reflect.Struct {
+ continue
+ }
+ if dig.IsOut(reflect.New(ft.Out(i)).Elem().Interface()) {
+ return errors.New("fx.Out structs cannot be annotated")
+ }
+ }
+
+ for i := 0; i < ft.NumIn(); i++ {
+ it := ft.In(i)
+ if it.Kind() != reflect.Struct {
+ continue
+ }
+ if dig.IsIn(reflect.New(ft.In(i)).Elem().Interface()) {
+ return errors.New("fx.In structs cannot be annotated")
+ }
+ }
+ return nil
+}
+
+func (ann *annotated) currentResultTypes() (resultTypes []reflect.Type, hasError bool) {
+ ft := reflect.TypeOf(ann.Target)
+ numOut := ft.NumOut()
+ resultTypes = make([]reflect.Type, numOut)
+
+ for i := 0; i < numOut; i++ {
+ resultTypes[i] = ft.Out(i)
+ if resultTypes[i] == _typeOfError && i == numOut-1 {
+ hasError = true
+ }
+ }
+ return resultTypes, hasError
+}
+
+func (ann *annotated) currentParamTypes() []reflect.Type {
+ ft := reflect.TypeOf(ann.Target)
+ paramTypes := make([]reflect.Type, ft.NumIn())
+
+ for i := 0; i < ft.NumIn(); i++ {
+ paramTypes[i] = ft.In(i)
+ }
+ return paramTypes
+}
+
+// Annotate lets you annotate a function's parameters and returns
+// without you having to declare separate struct definitions for them.
+//
+// For example,
+//
+// func NewGateway(ro, rw *db.Conn) *Gateway { ... }
+// fx.Provide(
+// fx.Annotate(
+// NewGateway,
+// fx.ParamTags(`name:"ro" optional:"true"`, `name:"rw"`),
+// fx.ResultTags(`name:"foo"`),
+// ),
+// )
+//
+// Is equivalent to,
+//
+// type params struct {
+// fx.In
+//
+// RO *db.Conn `name:"ro" optional:"true"`
+// RW *db.Conn `name:"rw"`
+// }
+//
+// type result struct {
+// fx.Out
+//
+// GW *Gateway `name:"foo"`
+// }
+//
+// fx.Provide(func(p params) result {
+// return result{GW: NewGateway(p.RO, p.RW)}
+// })
+//
+// Annotate cannot be used on functions that takes in or returns
+// [In] or [Out] structs.
+//
+// Using the same annotation multiple times is invalid.
+// For example, the following will fail with an error:
+//
+// fx.Provide(
+// fx.Annotate(
+// NewGateWay,
+// fx.ParamTags(`name:"ro" optional:"true"`),
+// fx.ParamTags(`name:"rw"), // ERROR: ParamTags was already used above
+// fx.ResultTags(`name:"foo"`)
+// )
+// )
+//
+// is considered an invalid usage and will not apply any of the
+// Annotations to NewGateway.
+//
+// If more tags are given than the number of parameters/results, only
+// the ones up to the number of parameters/results will be applied.
+//
+// # Variadic functions
+//
+// If the provided function is variadic, Annotate treats its parameter as a
+// slice. For example,
+//
+// fx.Annotate(func(w io.Writer, rs ...io.Reader) {
+// // ...
+// }, ...)
+//
+// Is equivalent to,
+//
+// fx.Annotate(func(w io.Writer, rs []io.Reader) {
+// // ...
+// }, ...)
+//
+// You can use variadic parameters with Fx's value groups.
+// For example,
+//
+// fx.Annotate(func(mux *http.ServeMux, handlers ...http.Handler) {
+// // ...
+// }, fx.ParamTags(``, `group:"server"`))
+//
+// If we provide the above to the application,
+// any constructor in the Fx application can inject its HTTP handlers
+// by using fx.Annotate, fx.Annotated, or fx.Out.
+//
+// fx.Annotate(
+// func(..) http.Handler { ... },
+// fx.ResultTags(`group:"server"`),
+// )
+//
+// fx.Annotated{
+// Target: func(..) http.Handler { ... },
+// Group: "server",
+// }
+func Annotate(t interface{}, anns ...Annotation) interface{} {
+ result := annotated{Target: t}
+ for _, ann := range anns {
+ if err := ann.apply(&result); err != nil {
+ return annotationError{
+ target: t,
+ err: err,
+ }
+ }
+ }
+ result.Annotations = anns
+ return result
+}
diff --git a/vendor/go.uber.org/fx/app.go b/vendor/go.uber.org/fx/app.go
new file mode 100644
index 000000000..a5e269908
--- /dev/null
+++ b/vendor/go.uber.org/fx/app.go
@@ -0,0 +1,810 @@
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "time"
+
+ "go.uber.org/dig"
+ "go.uber.org/fx/fxevent"
+ "go.uber.org/fx/internal/fxclock"
+ "go.uber.org/fx/internal/fxlog"
+ "go.uber.org/fx/internal/fxreflect"
+ "go.uber.org/fx/internal/lifecycle"
+ "go.uber.org/multierr"
+)
+
+// DefaultTimeout is the default timeout for starting or stopping an
+// application. It can be configured with the StartTimeout and StopTimeout
+// options.
+const DefaultTimeout = 15 * time.Second
+
+// An Option configures an App using the functional options paradigm
+// popularized by Rob Pike. If you're unfamiliar with this style, see
+// https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html.
+type Option interface {
+ fmt.Stringer
+
+ apply(*module)
+}
+
+// Error registers any number of errors with the application to short-circuit
+// startup. If more than one error is given, the errors are combined into a
+// single error.
+//
+// Similar to invocations, errors are applied in order. All Provide and Invoke
+// options registered before or after an Error option will not be applied.
+func Error(errs ...error) Option {
+ return errorOption(errs)
+}
+
+type errorOption []error
+
+func (errs errorOption) apply(mod *module) {
+ mod.app.err = multierr.Append(mod.app.err, multierr.Combine(errs...))
+}
+
+func (errs errorOption) String() string {
+ return fmt.Sprintf("fx.Error(%v)", multierr.Combine(errs...))
+}
+
+// Options converts a collection of Options into a single Option. This allows
+// packages to bundle sophisticated functionality into easy-to-use Fx modules.
+// For example, a logging package might export a simple option like this:
+//
+// package logging
+//
+// var Module = fx.Provide(func() *log.Logger {
+// return log.New(os.Stdout, "", 0)
+// })
+//
+// A shared all-in-one microservice package could then use Options to bundle
+// logging with similar metrics, tracing, and gRPC modules:
+//
+// package server
+//
+// var Module = fx.Options(
+// logging.Module,
+// metrics.Module,
+// tracing.Module,
+// grpc.Module,
+// )
+//
+// Since this all-in-one module has a minimal API surface, it's easy to add
+// new functionality to it without breaking existing users. Individual
+// applications can take advantage of all this functionality with only one
+// line of code:
+//
+// app := fx.New(server.Module)
+//
+// Use this pattern sparingly, since it limits the user's ability to customize
+// their application.
+func Options(opts ...Option) Option {
+ return optionGroup(opts)
+}
+
+type optionGroup []Option
+
+func (og optionGroup) apply(mod *module) {
+ for _, opt := range og {
+ opt.apply(mod)
+ }
+}
+
+func (og optionGroup) String() string {
+ items := make([]string, len(og))
+ for i, opt := range og {
+ items[i] = fmt.Sprint(opt)
+ }
+ return fmt.Sprintf("fx.Options(%s)", strings.Join(items, ", "))
+}
+
+// StartTimeout changes the application's start timeout.
+func StartTimeout(v time.Duration) Option {
+ return startTimeoutOption(v)
+}
+
+type startTimeoutOption time.Duration
+
+func (t startTimeoutOption) apply(m *module) {
+ if m.parent != nil {
+ m.app.err = fmt.Errorf("fx.StartTimeout Option should be passed to top-level App, " +
+ "not to fx.Module")
+ } else {
+ m.app.startTimeout = time.Duration(t)
+ }
+}
+
+func (t startTimeoutOption) String() string {
+ return fmt.Sprintf("fx.StartTimeout(%v)", time.Duration(t))
+}
+
+// StopTimeout changes the application's stop timeout.
+func StopTimeout(v time.Duration) Option {
+ return stopTimeoutOption(v)
+}
+
+type stopTimeoutOption time.Duration
+
+func (t stopTimeoutOption) apply(m *module) {
+ if m.parent != nil {
+ m.app.err = fmt.Errorf("fx.StopTimeout Option should be passed to top-level App, " +
+ "not to fx.Module")
+ } else {
+ m.app.stopTimeout = time.Duration(t)
+ }
+}
+
+func (t stopTimeoutOption) String() string {
+ return fmt.Sprintf("fx.StopTimeout(%v)", time.Duration(t))
+}
+
+// RecoverFromPanics causes panics that occur in functions given to [Provide],
+// [Decorate], and [Invoke] to be recovered from.
+// This error can be retrieved as any other error, by using (*App).Err().
+func RecoverFromPanics() Option {
+ return recoverFromPanicsOption{}
+}
+
+type recoverFromPanicsOption struct{}
+
+func (o recoverFromPanicsOption) apply(m *module) {
+ if m.parent != nil {
+ m.app.err = fmt.Errorf("fx.RecoverFromPanics Option should be passed to top-level " +
+ "App, not to fx.Module")
+ } else {
+ m.app.recoverFromPanics = true
+ }
+}
+
+func (o recoverFromPanicsOption) String() string {
+ return "fx.RecoverFromPanics()"
+}
+
+// WithLogger specifies how Fx should build an fxevent.Logger to log its events
+// to. The argument must be a constructor with one of the following return
+// types.
+//
+// fxevent.Logger
+// (fxevent.Logger, error)
+//
+// For example,
+//
+// WithLogger(func(logger *zap.Logger) fxevent.Logger {
+// return &fxevent.ZapLogger{Logger: logger}
+// })
+func WithLogger(constructor interface{}) Option {
+ return withLoggerOption{
+ constructor: constructor,
+ Stack: fxreflect.CallerStack(1, 0),
+ }
+}
+
+type withLoggerOption struct {
+ constructor interface{}
+ Stack fxreflect.Stack
+}
+
+func (l withLoggerOption) apply(m *module) {
+ m.logConstructor = &provide{
+ Target: l.constructor,
+ Stack: l.Stack,
+ }
+}
+
+func (l withLoggerOption) String() string {
+ return fmt.Sprintf("fx.WithLogger(%s)", fxreflect.FuncName(l.constructor))
+}
+
+// Printer is the interface required by Fx's logging backend. It's implemented
+// by most loggers, including the one bundled with the standard library.
+//
+// Note, this will be deprecate with next release and you will need to implement
+// fxevent.Logger interface instead.
+type Printer interface {
+ Printf(string, ...interface{})
+}
+
+// Logger redirects the application's log output to the provided printer.
+// Deprecated: use WithLogger instead.
+func Logger(p Printer) Option {
+ return loggerOption{p}
+}
+
+type loggerOption struct{ p Printer }
+
+func (l loggerOption) apply(m *module) {
+ if m.parent != nil {
+ m.app.err = fmt.Errorf("fx.Logger Option should be passed to top-level App, " +
+ "not to fx.Module")
+ } else {
+ np := writerFromPrinter(l.p)
+ m.log = fxlog.DefaultLogger(np) // assuming np is thread-safe.
+ }
+}
+
+func (l loggerOption) String() string {
+ return fmt.Sprintf("fx.Logger(%v)", l.p)
+}
+
+// NopLogger disables the application's log output. Note that this makes some
+// failures difficult to debug, since no errors are printed to console.
+var NopLogger = WithLogger(func() fxevent.Logger { return fxevent.NopLogger })
+
+// An App is a modular application built around dependency injection. Most
+// users will only need to use the New constructor and the all-in-one Run
+// convenience method. In more unusual cases, users may need to use the Err,
+// Start, Done, and Stop methods by hand instead of relying on Run.
+//
+// New creates and initializes an App. All applications begin with a
+// constructor for the Lifecycle type already registered.
+//
+// In addition to that built-in functionality, users typically pass a handful
+// of Provide options and one or more Invoke options. The Provide options
+// teach the application how to instantiate a variety of types, and the Invoke
+// options describe how to initialize the application.
+//
+// When created, the application immediately executes all the functions passed
+// via Invoke options. To supply these functions with the parameters they
+// need, the application looks for constructors that return the appropriate
+// types; if constructors for any required types are missing or any
+// invocations return an error, the application will fail to start (and Err
+// will return a descriptive error message).
+//
+// Once all the invocations (and any required constructors) have been called,
+// New returns and the application is ready to be started using Run or Start.
+// On startup, it executes any OnStart hooks registered with its Lifecycle.
+// OnStart hooks are executed one at a time, in order, and must all complete
+// within a configurable deadline (by default, 15 seconds). For details on the
+// order in which OnStart hooks are executed, see the documentation for the
+// Start method.
+//
+// At this point, the application has successfully started up. If started via
+// Run, it will continue operating until it receives a shutdown signal from
+// Done (see the Done documentation for details); if started explicitly via
+// Start, it will operate until the user calls Stop. On shutdown, OnStop hooks
+// execute one at a time, in reverse order, and must all complete within a
+// configurable deadline (again, 15 seconds by default).
+type App struct {
+ err error
+ clock fxclock.Clock
+ lifecycle *lifecycleWrapper
+
+ container *dig.Container
+ root *module
+ modules []*module
+
+ // Timeouts used
+ startTimeout time.Duration
+ stopTimeout time.Duration
+ // Decides how we react to errors when building the graph.
+ errorHooks []ErrorHandler
+ validate bool
+ // Whether to recover from panics in Dig container
+ recoverFromPanics bool
+
+ // Used to signal shutdowns.
+ receivers signalReceivers
+
+ osExit func(code int) // os.Exit override; used for testing only
+}
+
+// provide is a single constructor provided to Fx.
+type provide struct {
+ // Constructor provided to Fx. This may be an fx.Annotated.
+ Target interface{}
+
+ // Stack trace of where this provide was made.
+ Stack fxreflect.Stack
+
+ // IsSupply is true when the Target constructor was emitted by fx.Supply.
+ IsSupply bool
+ SupplyType reflect.Type // set only if IsSupply
+
+ // Set if the type should be provided at private scope.
+ Private bool
+}
+
+// invoke is a single invocation request to Fx.
+type invoke struct {
+ // Function to invoke.
+ Target interface{}
+
+ // Stack trace of where this invoke was made.
+ Stack fxreflect.Stack
+}
+
+// ErrorHandler handles Fx application startup errors.
+type ErrorHandler interface {
+ HandleError(error)
+}
+
+// ErrorHook registers error handlers that implement error handling functions.
+// They are executed on invoke failures. Passing multiple ErrorHandlers appends
+// the new handlers to the application's existing list.
+func ErrorHook(funcs ...ErrorHandler) Option {
+ return errorHookOption(funcs)
+}
+
+type errorHookOption []ErrorHandler
+
+func (eho errorHookOption) apply(m *module) {
+ m.app.errorHooks = append(m.app.errorHooks, eho...)
+}
+
+func (eho errorHookOption) String() string {
+ items := make([]string, len(eho))
+ for i, eh := range eho {
+ items[i] = fmt.Sprint(eh)
+ }
+ return fmt.Sprintf("fx.ErrorHook(%v)", strings.Join(items, ", "))
+}
+
+type errorHandlerList []ErrorHandler
+
+func (ehl errorHandlerList) HandleError(err error) {
+ for _, eh := range ehl {
+ eh.HandleError(err)
+ }
+}
+
+// validate sets *App into validation mode without running invoked functions.
+func validate(validate bool) Option {
+ return &validateOption{
+ validate: validate,
+ }
+}
+
+type validateOption struct {
+ validate bool
+}
+
+func (o validateOption) apply(m *module) {
+ if m.parent != nil {
+ m.app.err = fmt.Errorf("fx.validate Option should be passed to top-level App, " +
+ "not to fx.Module")
+ } else {
+ m.app.validate = o.validate
+ }
+}
+
+func (o validateOption) String() string {
+ return fmt.Sprintf("fx.validate(%v)", o.validate)
+}
+
+// ValidateApp validates that supplied graph would run and is not missing any dependencies. This
+// method does not invoke actual input functions.
+func ValidateApp(opts ...Option) error {
+ opts = append(opts, validate(true))
+ app := New(opts...)
+
+ return app.Err()
+}
+
+// New creates and initializes an App, immediately executing any functions
+// registered via Invoke options. See the documentation of the App struct for
+// details on the application's initialization, startup, and shutdown logic.
+func New(opts ...Option) *App {
+ logger := fxlog.DefaultLogger(os.Stderr)
+
+ app := &App{
+ clock: fxclock.System,
+ startTimeout: DefaultTimeout,
+ stopTimeout: DefaultTimeout,
+ receivers: newSignalReceivers(),
+ }
+ app.root = &module{
+ app: app,
+ // We start with a logger that writes to stderr. One of the
+ // following three things can change this:
+ //
+ // - fx.Logger was provided to change the output stream
+ // - fx.WithLogger was provided to change the logger
+ // implementation
+ // - Both, fx.Logger and fx.WithLogger were provided
+ //
+ // The first two cases are straightforward: we use what the
+ // user gave us. For the last case, however, we need to fall
+ // back to what was provided to fx.Logger if fx.WithLogger
+ // fails.
+ log: logger,
+ }
+ app.modules = append(app.modules, app.root)
+
+ for _, opt := range opts {
+ opt.apply(app.root)
+ }
+
+ // There are a few levels of wrapping on the lifecycle here. To quickly
+ // cover them:
+ //
+ // - lifecycleWrapper ensures that we don't unintentionally expose the
+ // Start and Stop methods of the internal lifecycle.Lifecycle type
+ // - lifecycleWrapper also adapts the internal lifecycle.Hook type into
+ // the public fx.Hook type.
+ // - appLogger ensures that the lifecycle always logs events to the
+ // "current" logger associated with the fx.App.
+ app.lifecycle = &lifecycleWrapper{
+ lifecycle.New(appLogger{app}, app.clock),
+ }
+
+ containerOptions := []dig.Option{
+ dig.DeferAcyclicVerification(),
+ dig.DryRun(app.validate),
+ }
+
+ if app.recoverFromPanics {
+ containerOptions = append(containerOptions, dig.RecoverFromPanics())
+ }
+
+ app.container = dig.New(containerOptions...)
+
+ for _, m := range app.modules {
+ m.build(app, app.container)
+ }
+
+ for _, m := range app.modules {
+ m.provideAll()
+ }
+
+ frames := fxreflect.CallerStack(0, 0) // include New in the stack for default Provides
+ app.root.provide(provide{
+ Target: func() Lifecycle { return app.lifecycle },
+ Stack: frames,
+ })
+ app.root.provide(provide{Target: app.shutdowner, Stack: frames})
+ app.root.provide(provide{Target: app.dotGraph, Stack: frames})
+
+ // Run decorators before executing any Invokes -- including the one
+ // inside constructCustomLogger.
+ app.err = multierr.Append(app.err, app.root.decorate())
+
+ // If you are thinking about returning here after provides: do not (just yet)!
+ // If a custom logger was being used, we're still buffering messages.
+ // We'll want to flush them to the logger.
+
+ // custom app logger will be initialized by the root module.
+ for _, m := range app.modules {
+ m.constructAllCustomLoggers()
+ }
+
+ // This error might have come from the provide loop above. We've
+ // already flushed to the custom logger, so we can return.
+ if app.err != nil {
+ return app
+ }
+
+ if err := app.root.executeInvokes(); err != nil {
+ app.err = err
+
+ if dig.CanVisualizeError(err) {
+ var b bytes.Buffer
+ dig.Visualize(app.container, &b, dig.VisualizeError(err))
+ err = errorWithGraph{
+ graph: b.String(),
+ err: err,
+ }
+ }
+ errorHandlerList(app.errorHooks).HandleError(err)
+ }
+
+ return app
+}
+
+func (app *App) log() fxevent.Logger {
+ return app.root.log
+}
+
+// DotGraph contains a DOT language visualization of the dependency graph in
+// an Fx application. It is provided in the container by default at
+// initialization. On failure to build the dependency graph, it is attached
+// to the error and if possible, colorized to highlight the root cause of the
+// failure.
+type DotGraph string
+
+type errWithGraph interface {
+ Graph() DotGraph
+}
+
+type errorWithGraph struct {
+ graph string
+ err error
+}
+
+func (err errorWithGraph) Graph() DotGraph {
+ return DotGraph(err.graph)
+}
+
+func (err errorWithGraph) Error() string {
+ return err.err.Error()
+}
+
+// VisualizeError returns the visualization of the error if available.
+func VisualizeError(err error) (string, error) {
+ if e, ok := err.(errWithGraph); ok && e.Graph() != "" {
+ return string(e.Graph()), nil
+ }
+ return "", errors.New("unable to visualize error")
+}
+
+// Exits the application with the given exit code.
+func (app *App) exit(code int) {
+ osExit := os.Exit
+ if app.osExit != nil {
+ osExit = app.osExit
+ }
+ osExit(code)
+}
+
+// Run starts the application, blocks on the signals channel, and then
+// gracefully shuts the application down. It uses DefaultTimeout to set a
+// deadline for application startup and shutdown, unless the user has
+// configured different timeouts with the StartTimeout or StopTimeout options.
+// It's designed to make typical applications simple to run.
+//
+// However, all of Run's functionality is implemented in terms of the exported
+// Start, Done, and Stop methods. Applications with more specialized needs
+// can use those methods directly instead of relying on Run.
+func (app *App) Run() {
+ // Historically, we do not os.Exit(0) even though most applications
+ // cede control to Fx with they call app.Run. To avoid a breaking
+ // change, never os.Exit for success.
+ if code := app.run(app.Done()); code != 0 {
+ app.exit(code)
+ }
+}
+
+func (app *App) run(done <-chan os.Signal) (exitCode int) {
+ startCtx, cancel := app.clock.WithTimeout(context.Background(), app.StartTimeout())
+ defer cancel()
+
+ if err := app.Start(startCtx); err != nil {
+ return 1
+ }
+
+ sig := <-done
+ app.log().LogEvent(&fxevent.Stopping{Signal: sig})
+
+ stopCtx, cancel := app.clock.WithTimeout(context.Background(), app.StopTimeout())
+ defer cancel()
+
+ if err := app.Stop(stopCtx); err != nil {
+ return 1
+ }
+
+ return 0
+}
+
+// Err returns any error encountered during New's initialization. See the
+// documentation of the New method for details, but typical errors include
+// missing constructors, circular dependencies, constructor errors, and
+// invocation errors.
+//
+// Most users won't need to use this method, since both Run and Start
+// short-circuit if initialization failed.
+func (app *App) Err() error {
+ return app.err
+}
+
+var (
+ _onStartHook = "OnStart"
+ _onStopHook = "OnStop"
+)
+
+// Start kicks off all long-running goroutines, like network servers or
+// message queue consumers. It does this by interacting with the application's
+// Lifecycle.
+//
+// By taking a dependency on the Lifecycle type, some of the user-supplied
+// functions called during initialization may have registered start and stop
+// hooks. Because initialization calls constructors serially and in dependency
+// order, hooks are naturally registered in serial and dependency order too.
+//
+// Start executes all OnStart hooks registered with the application's
+// Lifecycle, one at a time and in order. This ensures that each constructor's
+// start hooks aren't executed until all its dependencies' start hooks
+// complete. If any of the start hooks return an error, Start short-circuits,
+// calls Stop, and returns the inciting error.
+//
+// Note that Start short-circuits immediately if the New constructor
+// encountered any errors in application initialization.
+func (app *App) Start(ctx context.Context) (err error) {
+ defer func() {
+ app.log().LogEvent(&fxevent.Started{Err: err})
+ }()
+
+ if app.err != nil {
+ // Some provides failed, short-circuit immediately.
+ return app.err
+ }
+
+ return withTimeout(ctx, &withTimeoutParams{
+ hook: _onStartHook,
+ callback: app.start,
+ lifecycle: app.lifecycle,
+ log: app.log(),
+ })
+}
+
+// withRollback will execute an anonymous function with a given context.
+// if the anon func returns an error, rollback methods will be called and related events emitted
+func (app *App) withRollback(
+ ctx context.Context,
+ f func(context.Context) error,
+) error {
+ if err := f(ctx); err != nil {
+ app.log().LogEvent(&fxevent.RollingBack{StartErr: err})
+
+ stopErr := app.lifecycle.Stop(ctx)
+ app.log().LogEvent(&fxevent.RolledBack{Err: stopErr})
+
+ if stopErr != nil {
+ return multierr.Append(err, stopErr)
+ }
+
+ return err
+ }
+
+ return nil
+}
+
+func (app *App) start(ctx context.Context) error {
+ return app.withRollback(ctx, func(ctx context.Context) error {
+ if err := app.lifecycle.Start(ctx); err != nil {
+ return err
+ }
+ app.receivers.Start(ctx)
+ return nil
+ })
+}
+
+// Stop gracefully stops the application. It executes any registered OnStop
+// hooks in reverse order, so that each constructor's stop hooks are called
+// before its dependencies' stop hooks.
+//
+// If the application didn't start cleanly, only hooks whose OnStart phase was
+// called are executed. However, all those hooks are executed, even if some
+// fail.
+func (app *App) Stop(ctx context.Context) (err error) {
+ defer func() {
+ app.log().LogEvent(&fxevent.Stopped{Err: err})
+ }()
+
+ cb := func(ctx context.Context) error {
+ defer app.receivers.Stop(ctx)
+ return app.lifecycle.Stop(ctx)
+ }
+
+ return withTimeout(ctx, &withTimeoutParams{
+ hook: _onStopHook,
+ callback: cb,
+ lifecycle: app.lifecycle,
+ log: app.log(),
+ })
+}
+
+// Done returns a channel of signals to block on after starting the
+// application. Applications listen for the SIGINT and SIGTERM signals; during
+// development, users can send the application SIGTERM by pressing Ctrl-C in
+// the same terminal as the running process.
+//
+// Alternatively, a signal can be broadcast to all done channels manually by
+// using the Shutdown functionality (see the Shutdowner documentation for details).
+//
+// Note: The channel Done returns will not receive a signal unless the application
+// as been started via Start or Run.
+func (app *App) Done() <-chan os.Signal {
+ return app.receivers.Done()
+}
+
+// Wait returns a channel of [ShutdownSignal] to block on after starting the
+// application and function, similar to [App.Done], but with a minor difference.
+// Should an ExitCode be provided as a [ShutdownOption] to
+// the Shutdowner Shutdown method, the exit code will be available as part
+// of the ShutdownSignal struct.
+//
+// Should the app receive a SIGTERM or SIGINT, the given
+// signal will be populated in the ShutdownSignal struct.
+func (app *App) Wait() <-chan ShutdownSignal {
+ return app.receivers.Wait()
+}
+
+// StartTimeout returns the configured startup timeout. Apps default to using
+// DefaultTimeout, but users can configure this behavior using the
+// StartTimeout option.
+func (app *App) StartTimeout() time.Duration {
+ return app.startTimeout
+}
+
+// StopTimeout returns the configured shutdown timeout. Apps default to using
+// DefaultTimeout, but users can configure this behavior using the StopTimeout
+// option.
+func (app *App) StopTimeout() time.Duration {
+ return app.stopTimeout
+}
+
+func (app *App) dotGraph() (DotGraph, error) {
+ var b bytes.Buffer
+ err := dig.Visualize(app.container, &b)
+ return DotGraph(b.String()), err
+}
+
+type withTimeoutParams struct {
+ log fxevent.Logger
+ hook string
+ callback func(context.Context) error
+ lifecycle *lifecycleWrapper
+}
+
+// errHookCallbackExited is returned when a hook callback does not finish executing
+var errHookCallbackExited = fmt.Errorf("goroutine exited without returning")
+
+func withTimeout(ctx context.Context, param *withTimeoutParams) error {
+ c := make(chan error, 1)
+ go func() {
+ // If runtime.Goexit() is called from within the callback
+ // then nothing is written to the chan.
+ // However the defer will still be called, so we can write to the chan,
+ // to avoid hanging until the timeout is reached.
+ callbackExited := false
+ defer func() {
+ if !callbackExited {
+ c <- errHookCallbackExited
+ }
+ }()
+
+ c <- param.callback(ctx)
+ callbackExited = true
+ }()
+
+ var err error
+
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ case err = <-c:
+ // If the context finished at the same time as the callback
+ // prefer the context error.
+ // This eliminates non-determinism in select-case selection.
+ if ctx.Err() != nil {
+ err = ctx.Err()
+ }
+ }
+
+ return err
+}
+
+// appLogger logs events to the given Fx app's "current" logger.
+//
+// Use this with lifecycle, for example, to ensure that events always go to the
+// correct logger.
+type appLogger struct{ app *App }
+
+func (l appLogger) LogEvent(ev fxevent.Event) {
+ l.app.log().LogEvent(ev)
+}
diff --git a/vendor/go.uber.org/fx/app_unixes.go b/vendor/go.uber.org/fx/app_unixes.go
new file mode 100644
index 000000000..5126e1618
--- /dev/null
+++ b/vendor/go.uber.org/fx/app_unixes.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package fx
+
+import "golang.org/x/sys/unix"
+
+const _sigINT = unix.SIGINT
+const _sigTERM = unix.SIGTERM
diff --git a/vendor/go.uber.org/fx/app_windows.go b/vendor/go.uber.org/fx/app_windows.go
new file mode 100644
index 000000000..b19a8d829
--- /dev/null
+++ b/vendor/go.uber.org/fx/app_windows.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build windows
+// +build windows
+
+package fx
+
+import "golang.org/x/sys/windows"
+
+const _sigINT = windows.SIGINT
+const _sigTERM = windows.SIGTERM
diff --git a/vendor/go.uber.org/fx/checklicense.sh b/vendor/go.uber.org/fx/checklicense.sh
new file mode 100644
index 000000000..28057d2fb
--- /dev/null
+++ b/vendor/go.uber.org/fx/checklicense.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go" | grep -v /testdata/)
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/fx/decorate.go b/vendor/go.uber.org/fx/decorate.go
new file mode 100644
index 000000000..6294a4efd
--- /dev/null
+++ b/vendor/go.uber.org/fx/decorate.go
@@ -0,0 +1,230 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "strings"
+
+ "go.uber.org/dig"
+ "go.uber.org/fx/internal/fxreflect"
+)
+
+// Decorate specifies one or more decorator functions to an Fx application.
+//
+// # Decorator functions
+//
+// Decorator functions let users augment objects in the graph.
+// They can take in zero or more dependencies that must be provided to the
+// application with fx.Provide, and produce one or more values that can be used
+// by other fx.Provide and fx.Invoke calls.
+//
+// fx.Decorate(func(log *zap.Logger) *zap.Logger {
+// return log.Named("myapp")
+// })
+// fx.Invoke(func(log *zap.Logger) {
+// log.Info("hello")
+// // Output:
+// // {"level": "info","logger":"myapp","msg":"hello"}
+// })
+//
+// The following decorator accepts multiple dependencies from the graph,
+// augments and returns one of them.
+//
+// fx.Decorate(func(log *zap.Logger, cfg *Config) *zap.Logger {
+// return log.Named(cfg.Name)
+// })
+//
+// Similar to fx.Provide, functions passed to fx.Decorate may optionally return
+// an error as their last result.
+// If a decorator returns a non-nil error, it will halt application startup.
+//
+// fx.Decorate(func(conn *sql.DB, cfg *Config) (*sql.DB, error) {
+// if err := conn.Ping(); err != nil {
+// return sql.Open("driver-name", cfg.FallbackDB)
+// }
+// return conn, nil
+// })
+//
+// Decorators support both, fx.In and fx.Out structs, similar to fx.Provide and
+// fx.Invoke.
+//
+// type Params struct {
+// fx.In
+//
+// Client usersvc.Client `name:"readOnly"`
+// }
+//
+// type Result struct {
+// fx.Out
+//
+// Client usersvc.Client `name:"readOnly"`
+// }
+//
+// fx.Decorate(func(p Params) Result {
+// ...
+// })
+//
+// Decorators can be annotated with the fx.Annotate function, but not with the
+// fx.Annotated type. Refer to documentation on fx.Annotate() to learn how to
+// use it for annotating functions.
+//
+// fx.Decorate(
+// fx.Annotate(
+// func(client usersvc.Client) usersvc.Client {
+// // ...
+// },
+// fx.ParamTags(`name:"readOnly"`),
+// fx.ResultTags(`name:"readOnly"`),
+// ),
+// )
+//
+// Decorators support augmenting, filtering, or replacing value groups.
+// To decorate a value group, expect the entire value group slice and produce
+// the new slice.
+//
+// type HandlerParam struct {
+// fx.In
+//
+// Log *zap.Logger
+// Handlers []Handler `group:"server"
+// }
+//
+// type HandlerResult struct {
+// fx.Out
+//
+// Handlers []Handler `group:"server"
+// }
+//
+// fx.Decorate(func(p HandlerParam) HandlerResult {
+// var r HandlerResult
+// for _, handler := range p.Handlers {
+// r.Handlers = append(r.Handlers, wrapWithLogger(p.Log, handler))
+// }
+// return r
+// }),
+//
+// # Decorator scope
+//
+// Modifications made to the Fx graph with fx.Decorate are scoped to the
+// deepest fx.Module inside which the decorator was specified.
+//
+// fx.Module("mymodule",
+// fx.Decorate(func(log *zap.Logger) *zap.Logger {
+// return log.Named("myapp")
+// }),
+// fx.Invoke(func(log *zap.Logger) {
+// log.Info("decorated logger")
+// // Output:
+// // {"level": "info","logger":"myapp","msg":"decorated logger"}
+// }),
+// ),
+// fx.Invoke(func(log *zap.Logger) {
+// log.Info("plain logger")
+// // Output:
+// // {"level": "info","msg":"plain logger"}
+// }),
+//
+// Decorations specified in the top-level fx.New call apply across the
+// application and chain with module-specific decorators.
+//
+// fx.New(
+// // ...
+// fx.Decorate(func(log *zap.Logger) *zap.Logger {
+// return log.With(zap.Field("service", "myservice"))
+// }),
+// // ...
+// fx.Invoke(func(log *zap.Logger) {
+// log.Info("outer decorator")
+// // Output:
+// // {"level": "info","service":"myservice","msg":"outer decorator"}
+// }),
+// // ...
+// fx.Module("mymodule",
+// fx.Decorate(func(log *zap.Logger) *zap.Logger {
+// return log.Named("myapp")
+// }),
+// fx.Invoke(func(log *zap.Logger) {
+// log.Info("inner decorator")
+// // Output:
+// // {"level": "info","logger":"myapp","service":"myservice","msg":"inner decorator"}
+// }),
+// ),
+// )
+func Decorate(decorators ...interface{}) Option {
+ return decorateOption{
+ Targets: decorators,
+ Stack: fxreflect.CallerStack(1, 0),
+ }
+}
+
+type decorateOption struct {
+ Targets []interface{}
+ Stack fxreflect.Stack
+}
+
+func (o decorateOption) apply(mod *module) {
+ for _, target := range o.Targets {
+ mod.decorators = append(mod.decorators, decorator{
+ Target: target,
+ Stack: o.Stack,
+ })
+ }
+}
+
+func (o decorateOption) String() string {
+ items := make([]string, len(o.Targets))
+ for i, f := range o.Targets {
+ items[i] = fxreflect.FuncName(f)
+ }
+ return fmt.Sprintf("fx.Decorate(%s)", strings.Join(items, ", "))
+}
+
+// decorator is a single decorator used in Fx.
+type decorator struct {
+ // Decorator provided to Fx.
+ Target interface{}
+
+ // Stack trace of where this provide was made.
+ Stack fxreflect.Stack
+
+ // Whether this decorator was specified via fx.Replace
+ IsReplace bool
+}
+
+func runDecorator(c container, d decorator, opts ...dig.DecorateOption) (err error) {
+ decorator := d.Target
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("fx.Decorate(%v) from:\n%+vFailed: %v", decorator, d.Stack, err)
+ }
+ }()
+
+ switch decorator := decorator.(type) {
+ case annotated:
+ if dcor, derr := decorator.Build(); derr == nil {
+ err = c.Decorate(dcor, opts...)
+ }
+ default:
+ err = c.Decorate(decorator, opts...)
+ }
+ return
+}
diff --git a/vendor/go.uber.org/fx/doc.go b/vendor/go.uber.org/fx/doc.go
new file mode 100644
index 000000000..590fe8d39
--- /dev/null
+++ b/vendor/go.uber.org/fx/doc.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package fx is a framework that makes it easy to build applications out of
+// reusable, composable modules.
+//
+// Fx applications use dependency injection to eliminate globals without the
+// tedium of manually wiring together function calls. Unlike other approaches
+// to dependency injection, Fx works with plain Go functions: you don't need
+// to use struct tags or embed special types, so Fx automatically works well
+// with most Go packages.
+//
+// Basic usage is explained in the package-level example below. If you're new
+// to Fx, start there! Advanced features, including named instances, optional
+// parameters, and value groups, are explained under the In and Out types.
+//
+// # Testing Fx Applications
+//
+// To test functions that use the Lifecycle type or to write end-to-end tests
+// of your Fx application, use the helper functions and types provided by the
+// go.uber.org/fx/fxtest package.
+package fx // import "go.uber.org/fx"
diff --git a/vendor/go.uber.org/fx/extract.go b/vendor/go.uber.org/fx/extract.go
new file mode 100644
index 000000000..c09e6d3ce
--- /dev/null
+++ b/vendor/go.uber.org/fx/extract.go
@@ -0,0 +1,156 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+var _typeOfIn = reflect.TypeOf(In{})
+
+// Extract fills the given struct with values from the dependency injection
+// container on application initialization. The target MUST be a pointer to a
+// struct. Only exported fields will be filled.
+//
+// Deprecated: Use Populate instead.
+func Extract(target interface{}) Option {
+ v := reflect.ValueOf(target)
+
+ if t := v.Type(); t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return Error(fmt.Errorf("Extract expected a pointer to a struct, got a %v", t))
+ }
+
+ v = v.Elem()
+ t := v.Type()
+
+ // We generate a function which accepts a single fx.In struct as an
+ // argument. This struct contains all exported fields of the target
+ // struct.
+
+ // Fields of the generated fx.In struct.
+ fields := make([]reflect.StructField, 0, t.NumField()+1)
+
+ // Anonymous dig.In field.
+ fields = append(fields, reflect.StructField{
+ Name: _typeOfIn.Name(),
+ Anonymous: true,
+ Type: _typeOfIn,
+ })
+
+ // List of values in the target struct aligned with the fields of the
+ // generated struct.
+ //
+ // So for example, if the target is,
+ //
+ // var target struct {
+ // Foo io.Reader
+ // bar []byte
+ // Baz io.Writer
+ // }
+ //
+ // The generated struct has the shape,
+ //
+ // struct {
+ // fx.In
+ //
+ // F0 io.Reader
+ // F2 io.Writer
+ // }
+ //
+ // And `targets` is,
+ //
+ // [
+ // target.Field(0), // Foo io.Reader
+ // target.Field(2), // Baz io.Writer
+ // ]
+ //
+ // As we iterate through the fields of the generated struct, we can copy
+ // the value into the corresponding value in the targets list.
+ targets := make([]reflect.Value, 0, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ // Skip unexported fields.
+ if f.Anonymous {
+ // If embedded, StructField.PkgPath is not a reliable indicator of
+ // whether the field is exported. See
+ // https://github.com/golang/go/issues/21122
+
+ t := f.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ if !isExported(t.Name()) {
+ continue
+ }
+ } else if f.PkgPath != "" {
+ continue
+ }
+
+ // We don't copy over names or embedded semantics.
+ fields = append(fields, reflect.StructField{
+ Name: fmt.Sprintf("F%d", i),
+ Type: f.Type,
+ Tag: f.Tag,
+ })
+ targets = append(targets, v.Field(i))
+ }
+
+ // Equivalent to,
+ //
+ // func(r struct {
+ // fx.In
+ //
+ // F1 Foo
+ // F2 Bar
+ // }) {
+ // target.Foo = r.F1
+ // target.Bar = r.F2
+ // }
+
+ fn := reflect.MakeFunc(
+ reflect.FuncOf(
+ []reflect.Type{reflect.StructOf(fields)},
+ nil, /* results */
+ false, /* variadic */
+ ),
+ func(args []reflect.Value) []reflect.Value {
+ result := args[0]
+ for i := 1; i < result.NumField(); i++ {
+ targets[i-1].Set(result.Field(i))
+ }
+ return nil
+ },
+ )
+
+ return Invoke(fn.Interface())
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
diff --git a/vendor/go.uber.org/fx/fxevent/console.go b/vendor/go.uber.org/fx/fxevent/console.go
new file mode 100644
index 000000000..320fdfd4d
--- /dev/null
+++ b/vendor/go.uber.org/fx/fxevent/console.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxevent
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ConsoleLogger is an Fx event logger that attempts to write human-readable
+// messages to the console.
+//
+// Use this during development.
+type ConsoleLogger struct {
+ W io.Writer
+}
+
+var _ Logger = (*ConsoleLogger)(nil)
+
+func (l *ConsoleLogger) logf(msg string, args ...interface{}) {
+ fmt.Fprintf(l.W, "[Fx] "+msg+"\n", args...)
+}
+
+// LogEvent logs the given event to the provided Zap logger.
+func (l *ConsoleLogger) LogEvent(event Event) {
+ switch e := event.(type) {
+ case *OnStartExecuting:
+ l.logf("HOOK OnStart\t\t%s executing (caller: %s)", e.FunctionName, e.CallerName)
+ case *OnStartExecuted:
+ if e.Err != nil {
+ l.logf("HOOK OnStart\t\t%s called by %s failed in %s: %+v", e.FunctionName, e.CallerName, e.Runtime, e.Err)
+ } else {
+ l.logf("HOOK OnStart\t\t%s called by %s ran successfully in %s", e.FunctionName, e.CallerName, e.Runtime)
+ }
+ case *OnStopExecuting:
+ l.logf("HOOK OnStop\t\t%s executing (caller: %s)", e.FunctionName, e.CallerName)
+ case *OnStopExecuted:
+ if e.Err != nil {
+ l.logf("HOOK OnStop\t\t%s called by %s failed in %s: %+v", e.FunctionName, e.CallerName, e.Runtime, e.Err)
+ } else {
+ l.logf("HOOK OnStop\t\t%s called by %s ran successfully in %s", e.FunctionName, e.CallerName, e.Runtime)
+ }
+ case *Supplied:
+ if e.Err != nil {
+ l.logf("ERROR\tFailed to supply %v: %+v", e.TypeName, e.Err)
+ } else if e.ModuleName != "" {
+ l.logf("SUPPLY\t%v from module %q", e.TypeName, e.ModuleName)
+ } else {
+ l.logf("SUPPLY\t%v", e.TypeName)
+ }
+ case *Provided:
+ var privateStr string
+ if e.Private {
+ privateStr = " (PRIVATE)"
+ }
+ for _, rtype := range e.OutputTypeNames {
+ if e.ModuleName != "" {
+ l.logf("PROVIDE%v\t%v <= %v from module %q", privateStr, rtype, e.ConstructorName, e.ModuleName)
+ } else {
+ l.logf("PROVIDE%v\t%v <= %v", privateStr, rtype, e.ConstructorName)
+ }
+ }
+ if e.Err != nil {
+ l.logf("Error after options were applied: %+v", e.Err)
+ }
+
+ case *Replaced:
+ for _, rtype := range e.OutputTypeNames {
+ if e.ModuleName != "" {
+ l.logf("REPLACE\t%v from module %q", rtype, e.ModuleName)
+ } else {
+ l.logf("REPLACE\t%v", rtype)
+ }
+ }
+ if e.Err != nil {
+ l.logf("ERROR\tFailed to replace: %+v", e.Err)
+ }
+ case *Decorated:
+ for _, rtype := range e.OutputTypeNames {
+ if e.ModuleName != "" {
+ l.logf("DECORATE\t%v <= %v from module %q", rtype, e.DecoratorName, e.ModuleName)
+ } else {
+ l.logf("DECORATE\t%v <= %v", rtype, e.DecoratorName)
+ }
+ }
+ if e.Err != nil {
+ l.logf("Error after options were applied: %+v", e.Err)
+ }
+ case *Invoking:
+ if e.ModuleName != "" {
+ l.logf("INVOKE\t\t%s from module %q", e.FunctionName, e.ModuleName)
+ } else {
+ l.logf("INVOKE\t\t%s", e.FunctionName)
+ }
+ case *Invoked:
+ if e.Err != nil {
+ l.logf("ERROR\t\tfx.Invoke(%v) called from:\n%+vFailed: %+v", e.FunctionName, e.Trace, e.Err)
+ }
+ case *Stopping:
+ l.logf("%v", strings.ToUpper(e.Signal.String()))
+ case *Stopped:
+ if e.Err != nil {
+ l.logf("ERROR\t\tFailed to stop cleanly: %+v", e.Err)
+ }
+ case *RollingBack:
+ l.logf("ERROR\t\tStart failed, rolling back: %+v", e.StartErr)
+ case *RolledBack:
+ if e.Err != nil {
+ l.logf("ERROR\t\tCouldn't roll back cleanly: %+v", e.Err)
+ }
+ case *Started:
+ if e.Err != nil {
+ l.logf("ERROR\t\tFailed to start: %+v", e.Err)
+ } else {
+ l.logf("RUNNING")
+ }
+ case *LoggerInitialized:
+ if e.Err != nil {
+ l.logf("ERROR\t\tFailed to initialize custom logger: %+v", e.Err)
+ } else {
+ l.logf("LOGGER\tInitialized custom logger from %v", e.ConstructorName)
+ }
+ }
+}
diff --git a/vendor/go.uber.org/fx/fxevent/event.go b/vendor/go.uber.org/fx/fxevent/event.go
new file mode 100644
index 000000000..c367f730c
--- /dev/null
+++ b/vendor/go.uber.org/fx/fxevent/event.go
@@ -0,0 +1,240 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxevent
+
+import (
+ "os"
+ "time"
+)
+
+// Event defines an event emitted by fx.
+type Event interface {
+ event() // Only fxlog can implement this interface.
+}
+
+// Passing events by type to make Event hashable in the future.
+func (*OnStartExecuting) event() {}
+func (*OnStartExecuted) event() {}
+func (*OnStopExecuting) event() {}
+func (*OnStopExecuted) event() {}
+func (*Supplied) event() {}
+func (*Provided) event() {}
+func (*Replaced) event() {}
+func (*Decorated) event() {}
+func (*Invoking) event() {}
+func (*Invoked) event() {}
+func (*Stopping) event() {}
+func (*Stopped) event() {}
+func (*RollingBack) event() {}
+func (*RolledBack) event() {}
+func (*Started) event() {}
+func (*LoggerInitialized) event() {}
+
+// OnStartExecuting is emitted before an OnStart hook is exeucted.
+type OnStartExecuting struct {
+ // FunctionName is the name of the function that will be executed.
+ FunctionName string
+
+ // CallerName is the name of the function that scheduled the hook for
+ // execution.
+ CallerName string
+}
+
+// OnStartExecuted is emitted after an OnStart hook has been executed.
+type OnStartExecuted struct {
+ // FunctionName is the name of the function that was executed.
+ FunctionName string
+
+ // CallerName is the name of the function that scheduled the hook for
+ // execution.
+ CallerName string
+
+ // Method specifies the kind of the hook. This is one of "OnStart" and
+ // "OnStop".
+ Method string
+
+ // Runtime specifies how long it took to run this hook.
+ Runtime time.Duration
+
+ // Err is non-nil if the hook failed to execute.
+ Err error
+}
+
+// OnStopExecuting is emitted before an OnStop hook is exeucted.
+type OnStopExecuting struct {
+ // FunctionName is the name of the function that will be executed.
+ FunctionName string
+
+ // CallerName is the name of the function that scheduled the hook for
+ // execution.
+ CallerName string
+}
+
+// OnStopExecuted is emitted after an OnStop hook has been executed.
+type OnStopExecuted struct {
+ // FunctionName is the name of the function that was executed.
+ FunctionName string
+
+ // CallerName is the name of the function that scheduled the hook for
+ // execution.
+ CallerName string
+
+ // Runtime specifies how long it took to run this hook.
+ Runtime time.Duration
+
+ // Err is non-nil if the hook failed to execute.
+ Err error
+}
+
+// Supplied is emitted after a value is added with fx.Supply.
+type Supplied struct {
+ // TypeName is the name of the type of value that was added.
+ TypeName string
+
+ // ModuleName is the name of the module in which the value was added to.
+ ModuleName string
+
+ // Err is non-nil if we failed to supply the value.
+ Err error
+}
+
+// Provided is emitted when a constructor is provided to Fx.
+type Provided struct {
+ // ConstructorName is the name of the constructor that was provided to
+ // Fx.
+ ConstructorName string
+
+ // OutputTypeNames is a list of names of types that are produced by
+ // this constructor.
+ OutputTypeNames []string
+
+ // ModuleName is the name of the module in which the constructor was
+ // provided to.
+ ModuleName string
+
+ // Err is non-nil if we failed to provide this constructor.
+ Err error
+
+ // Private denotes whether the provided constructor is a [Private] constructor.
+ Private bool
+}
+
+// Replaced is emitted when a value replaces a type in Fx.
+type Replaced struct {
+ // OutputTypeNames is a list of names of types that were replaced.
+ OutputTypeNames []string
+
+ // ModuleName is the name of the module in which the value was added to.
+ ModuleName string
+
+ // Err is non-nil if we failed to supply the value.
+ Err error
+}
+
+// Decorated is emitted when a decorator is executed in Fx.
+type Decorated struct {
+ // DecoratorName is the name of the decorator function that was
+ // provided to Fx.
+ DecoratorName string
+
+ // ModuleName is the name of the module in which the value was added to.
+ ModuleName string
+
+ // OutputTypeNames is a list of names of types that are decorated by
+ // this decorator.
+ OutputTypeNames []string
+
+ // Err is non-nil if we failed to run this decorator.
+ Err error
+}
+
+// Invoking is emitted before we invoke a function specified with fx.Invoke.
+type Invoking struct {
+ // FunctionName is the name of the function that will be invoked.
+ FunctionName string
+
+ // ModuleName is the name of the module in which the value was added to.
+ ModuleName string
+}
+
+// Invoked is emitted after we invoke a function specified with fx.Invoke,
+// whether it succeeded or failed.
+type Invoked struct {
+ // Functionname is the name of the function that was invoked.
+ FunctionName string
+
+ // ModuleName is the name of the module in which the value was added to.
+ ModuleName string
+
+ // Err is non-nil if the function failed to execute.
+ Err error
+
+ // Trace records information about where the fx.Invoke call was made.
+ // Note that this is NOT a stack trace of the error itself.
+ Trace string
+}
+
+// Started is emitted when an application is started successfully and/or it
+// errored.
+type Started struct {
+ // Err is non-nil if the application failed to start successfully.
+ Err error
+}
+
+// Stopping is emitted when the application receives a signal to shut down
+// after starting. This may happen with fx.Shutdowner or by sending a signal to
+// the application on the command line.
+type Stopping struct {
+ // Signal is the signal that caused this shutdown.
+ Signal os.Signal
+}
+
+// Stopped is emitted when the application has finished shutting down, whether
+// successfully or not.
+type Stopped struct {
+ // Err is non-nil if errors were encountered during shutdown.
+ Err error
+}
+
+// RollingBack is emitted when the application failed to start up due to an
+// error, and is being rolled back.
+type RollingBack struct {
+ // StartErr is the error that caused this rollback.
+ StartErr error
+}
+
+// RolledBack is emitted after a service has been rolled back, whether it
+// succeeded or not.
+type RolledBack struct {
+ // Err is non-nil if the rollback failed.
+ Err error
+}
+
+// LoggerInitialized is emitted when a logger supplied with fx.WithLogger is
+// instantiated, or if it fails to instantiate.
+type LoggerInitialized struct {
+ // ConstructorName is the name of the constructor that builds this
+ // logger.
+ ConstructorName string
+
+ // Err is non-nil if the logger failed to build.
+ Err error
+}
diff --git a/vendor/go.uber.org/fx/fxevent/logger.go b/vendor/go.uber.org/fx/fxevent/logger.go
new file mode 100644
index 000000000..0822d0dca
--- /dev/null
+++ b/vendor/go.uber.org/fx/fxevent/logger.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxevent
+
+// Logger defines interface used for logging.
+type Logger interface {
+ // LogEvent is called when a logging event is emitted.
+ LogEvent(Event)
+}
+
+// NopLogger is an Fx event logger that ignores all messages.
+var NopLogger = nopLogger{}
+
+type nopLogger struct{}
+
+var _ Logger = nopLogger{}
+
+func (nopLogger) LogEvent(Event) {}
+
+func (nopLogger) String() string { return "NopLogger" }
diff --git a/vendor/go.uber.org/fx/fxevent/zap.go b/vendor/go.uber.org/fx/fxevent/zap.go
new file mode 100644
index 000000000..b558b2ac8
--- /dev/null
+++ b/vendor/go.uber.org/fx/fxevent/zap.go
@@ -0,0 +1,209 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxevent
+
+import (
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// ZapLogger is an Fx event logger that logs events to Zap.
+type ZapLogger struct {
+ Logger *zap.Logger
+
+ logLevel zapcore.Level // default: zapcore.InfoLevel
+ errorLevel *zapcore.Level
+}
+
+var _ Logger = (*ZapLogger)(nil)
+
+// UseErrorLevel sets the level of error logs emitted by Fx to level.
+func (l *ZapLogger) UseErrorLevel(level zapcore.Level) {
+ l.errorLevel = &level
+}
+
+// UseLogLevel sets the level of non-error logs emitted by Fx to level.
+func (l *ZapLogger) UseLogLevel(level zapcore.Level) {
+ l.logLevel = level
+}
+
+func (l *ZapLogger) logEvent(msg string, fields ...zap.Field) {
+ l.Logger.Log(l.logLevel, msg, fields...)
+}
+
+func (l *ZapLogger) logError(msg string, fields ...zap.Field) {
+ lvl := zapcore.ErrorLevel
+ if l.errorLevel != nil {
+ lvl = *l.errorLevel
+ }
+ l.Logger.Log(lvl, msg, fields...)
+}
+
+// LogEvent logs the given event to the provided Zap logger.
+func (l *ZapLogger) LogEvent(event Event) {
+ switch e := event.(type) {
+ case *OnStartExecuting:
+ l.logEvent("OnStart hook executing",
+ zap.String("callee", e.FunctionName),
+ zap.String("caller", e.CallerName),
+ )
+ case *OnStartExecuted:
+ if e.Err != nil {
+ l.logError("OnStart hook failed",
+ zap.String("callee", e.FunctionName),
+ zap.String("caller", e.CallerName),
+ zap.Error(e.Err),
+ )
+ } else {
+ l.logEvent("OnStart hook executed",
+ zap.String("callee", e.FunctionName),
+ zap.String("caller", e.CallerName),
+ zap.String("runtime", e.Runtime.String()),
+ )
+ }
+ case *OnStopExecuting:
+ l.logEvent("OnStop hook executing",
+ zap.String("callee", e.FunctionName),
+ zap.String("caller", e.CallerName),
+ )
+ case *OnStopExecuted:
+ if e.Err != nil {
+ l.logError("OnStop hook failed",
+ zap.String("callee", e.FunctionName),
+ zap.String("caller", e.CallerName),
+ zap.Error(e.Err),
+ )
+ } else {
+ l.logEvent("OnStop hook executed",
+ zap.String("callee", e.FunctionName),
+ zap.String("caller", e.CallerName),
+ zap.String("runtime", e.Runtime.String()),
+ )
+ }
+ case *Supplied:
+ if e.Err != nil {
+ l.logError("error encountered while applying options",
+ zap.String("type", e.TypeName),
+ moduleField(e.ModuleName),
+ zap.Error(e.Err))
+ } else {
+ l.logEvent("supplied",
+ zap.String("type", e.TypeName),
+ moduleField(e.ModuleName),
+ )
+ }
+ case *Provided:
+ for _, rtype := range e.OutputTypeNames {
+ l.logEvent("provided",
+ zap.String("constructor", e.ConstructorName),
+ moduleField(e.ModuleName),
+ zap.String("type", rtype),
+ maybeBool("private", e.Private),
+ )
+ }
+ if e.Err != nil {
+ l.logError("error encountered while applying options",
+ moduleField(e.ModuleName),
+ zap.Error(e.Err))
+ }
+ case *Replaced:
+ for _, rtype := range e.OutputTypeNames {
+ l.logEvent("replaced",
+ moduleField(e.ModuleName),
+ zap.String("type", rtype),
+ )
+ }
+ if e.Err != nil {
+ l.logError("error encountered while replacing",
+ moduleField(e.ModuleName),
+ zap.Error(e.Err))
+ }
+ case *Decorated:
+ for _, rtype := range e.OutputTypeNames {
+ l.logEvent("decorated",
+ zap.String("decorator", e.DecoratorName),
+ moduleField(e.ModuleName),
+ zap.String("type", rtype),
+ )
+ }
+ if e.Err != nil {
+ l.logError("error encountered while applying options",
+ moduleField(e.ModuleName),
+ zap.Error(e.Err))
+ }
+ case *Invoking:
+ // Do not log stack as it will make logs hard to read.
+ l.logEvent("invoking",
+ zap.String("function", e.FunctionName),
+ moduleField(e.ModuleName),
+ )
+ case *Invoked:
+ if e.Err != nil {
+ l.logError("invoke failed",
+ zap.Error(e.Err),
+ zap.String("stack", e.Trace),
+ zap.String("function", e.FunctionName),
+ moduleField(e.ModuleName),
+ )
+ }
+ case *Stopping:
+ l.logEvent("received signal",
+ zap.String("signal", strings.ToUpper(e.Signal.String())))
+ case *Stopped:
+ if e.Err != nil {
+ l.logError("stop failed", zap.Error(e.Err))
+ }
+ case *RollingBack:
+ l.logError("start failed, rolling back", zap.Error(e.StartErr))
+ case *RolledBack:
+ if e.Err != nil {
+ l.logError("rollback failed", zap.Error(e.Err))
+ }
+ case *Started:
+ if e.Err != nil {
+ l.logError("start failed", zap.Error(e.Err))
+ } else {
+ l.logEvent("started")
+ }
+ case *LoggerInitialized:
+ if e.Err != nil {
+ l.logError("custom logger initialization failed", zap.Error(e.Err))
+ } else {
+ l.logEvent("initialized custom fxevent.Logger", zap.String("function", e.ConstructorName))
+ }
+ }
+}
+
+func moduleField(name string) zap.Field {
+ if len(name) == 0 {
+ return zap.Skip()
+ }
+ return zap.String("module", name)
+}
+
+func maybeBool(name string, b bool) zap.Field {
+ if b {
+ return zap.Bool(name, true)
+ }
+ return zap.Skip()
+}
diff --git a/vendor/go.uber.org/fx/inout.go b/vendor/go.uber.org/fx/inout.go
new file mode 100644
index 000000000..05303944c
--- /dev/null
+++ b/vendor/go.uber.org/fx/inout.go
@@ -0,0 +1,364 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import "go.uber.org/dig"
+
+// In can be embedded in a constructor's parameter struct to take advantage of
+// advanced dependency injection features.
+//
+// Modules should take a single parameter struct that embeds an In in order to
+// provide a forward-compatible API: since adding fields to a struct is
+// backward-compatible, modules can then add optional dependencies in minor
+// releases.
+//
+// # Parameter Structs
+//
+// Fx constructors declare their dependencies as function parameters. This can
+// quickly become unreadable if the constructor has a lot of dependencies.
+//
+// func NewHandler(users *UserGateway, comments *CommentGateway, posts *PostGateway, votes *VoteGateway, authz *AuthZGateway) *Handler {
+// // ...
+// }
+//
+// To improve the readability of constructors like this, create a struct that
+// lists all the dependencies as fields and change the function to accept that
+// struct instead. The new struct is called a parameter struct.
+//
+// Fx has first class support for parameter structs: any struct embedding
+// fx.In gets treated as a parameter struct, so the individual fields in the
+// struct are supplied via dependency injection. Using a parameter struct, we
+// can make the constructor above much more readable:
+//
+// type HandlerParams struct {
+// fx.In
+//
+// Users *UserGateway
+// Comments *CommentGateway
+// Posts *PostGateway
+// Votes *VoteGateway
+// AuthZ *AuthZGateway
+// }
+//
+// func NewHandler(p HandlerParams) *Handler {
+// // ...
+// }
+//
+// Though it's rarely a good idea, constructors can receive any combination of
+// parameter structs and parameters.
+//
+// func NewHandler(p HandlerParams, l *log.Logger) *Handler {
+// // ...
+// }
+//
+// # Optional Dependencies
+//
+// Constructors often have optional dependencies on some types: if those types are
+// missing, they can operate in a degraded state. Fx supports optional
+// dependencies via the `optional:"true"` tag to fields on parameter structs.
+//
+// type UserGatewayParams struct {
+// fx.In
+//
+// Conn *sql.DB
+// Cache *redis.Client `optional:"true"`
+// }
+//
+// If an optional field isn't available in the container, the constructor
+// receives the field's zero value.
+//
+// func NewUserGateway(p UserGatewayParams, log *log.Logger) (*UserGateway, error) {
+// if p.Cache == nil {
+// log.Print("Caching disabled")
+// }
+// // ...
+// }
+//
+// Constructors that declare optional dependencies MUST gracefully handle
+// situations in which those dependencies are absent.
+//
+// The optional tag also allows adding new dependencies without breaking
+// existing consumers of the constructor.
+//
+// # Named Values
+//
+// Some use cases require the application container to hold multiple values of
+// the same type. For details on producing named values, see the documentation
+// for the Out type.
+//
+// Fx allows functions to consume named values via the `name:".."` tag on
+// parameter structs. Note that both the name AND type of the fields on the
+// parameter struct must match the corresponding result struct.
+//
+// type GatewayParams struct {
+// fx.In
+//
+// WriteToConn *sql.DB `name:"rw"`
+// ReadFromConn *sql.DB `name:"ro"`
+// }
+//
+// The name tag may be combined with the optional tag to declare the
+// dependency optional.
+//
+// type GatewayParams struct {
+// fx.In
+//
+// WriteToConn *sql.DB `name:"rw"`
+// ReadFromConn *sql.DB `name:"ro" optional:"true"`
+// }
+//
+// func NewCommentGateway(p GatewayParams, log *log.Logger) (*CommentGateway, error) {
+// if p.ReadFromConn == nil {
+// log.Print("Warning: Using RW connection for reads")
+// p.ReadFromConn = p.WriteToConn
+// }
+// // ...
+// }
+//
+// # Value Groups
+//
+// To make it easier to produce and consume many values of the same type, Fx
+// supports named, unordered collections called value groups. For details on
+// producing value groups, see the documentation for the Out type.
+//
+// Functions can depend on a value group by requesting a slice tagged with
+// `group:".."`. This will execute all constructors that provide a value to
+// that group in an unspecified order, then collect all the results into a
+// single slice. Keep in mind that this makes the types of the parameter and
+// result struct fields different: if a group of constructors each returns
+// type T, parameter structs consuming the group must use a field of type []T.
+//
+// type ServerParams struct {
+// fx.In
+//
+// Handlers []Handler `group:"server"`
+// }
+//
+// func NewServer(p ServerParams) *Server {
+// server := newServer()
+// for _, h := range p.Handlers {
+// server.Register(h)
+// }
+// return server
+// }
+//
+// Note that values in a value group are unordered. Fx makes no guarantees
+// about the order in which these values will be produced.
+//
+// # Soft Value Groups
+//
+// A soft value group can be thought of as a best-attempt at populating the
+// group with values from constructors that have already run. In other words,
+// if a constructor's output type is only consumed by a soft value group,
+// it will not be run.
+//
+// Note that Fx does not guarantee precise execution order of constructors
+// or invokers, which means that the change in code that affects execution
+// ordering of other constructors or functions will affect the values
+// populated in this group.
+//
+// To declare a soft relationship between a group and its constructors, use
+// the `soft` option on the group tag (`group:"[groupname],soft"`).
+// This option is only valid for input parameters.
+//
+// type Params struct {
+// fx.In
+//
+// Handlers []Handler `group:"server,soft"`
+// Logger *zap.Logger
+// }
+//
+// NewHandlerAndLogger := func() (Handler, *zap.Logger) { ... }
+// NewHandler := func() Handler { ... }
+// Foo := func(Params) { ... }
+//
+// app := fx.New(
+// fx.Provide(fx.Annotate(NewHandlerAndLogger, fx.ResultTags(`group:"server"`))),
+// fx.Provide(fx.Annotate(NewHandler, fx.ResultTags(`group::"server"`))),
+// fx.Invoke(Foo),
+// )
+//
+// The only constructor called is `NewHandlerAndLogger`, because this also provides
+// `*zap.Logger` needed in the `Params` struct received by `Foo`. The Handlers
+// group will be populated with a single Handler returned by `NewHandlerAndLogger`.
+//
+// In the next example, the slice `s` isn't populated as the provider would be
+// called only because `strings` soft group value is its only consumer.
+//
+// app := fx.New(
+// fx.Provide(
+// fx.Annotate(
+// func() (string, int) { return "hello", 42 },
+// fx.ResultTags(`group:"strings"`),
+// ),
+// ),
+// fx.Invoke(
+// fx.Annotate(func(s []string) {
+// // s will be an empty slice
+// }, fx.ParamTags(`group:"strings,soft"`)),
+// ),
+// )
+//
+// In the next example, the slice `s` will be populated because there is a
+// consumer for the same type which is not a `soft` dependency.
+//
+// app := fx.New(
+// fx.Provide(
+// fx.Annotate(
+// func() string { "hello" },
+// fx.ResultTags(`group:"strings"`),
+// ),
+// ),
+// fx.Invoke(
+// fx.Annotate(func(b []string) {
+// // b is []string{"hello"}
+// }, fx.ParamTags(`group:"strings"`)),
+// ),
+// fx.Invoke(
+// fx.Annotate(func(s []string) {
+// // s is []string{"hello"}
+// }, fx.ParamTags(`group:"strings,soft"`)),
+// ),
+// )
+//
+// # Unexported fields
+//
+// By default, a type that embeds fx.In may not have any unexported fields. The
+// following will return an error if used with Fx.
+//
+// type Params struct {
+// fx.In
+//
+// Logger *zap.Logger
+// mu sync.Mutex
+// }
+//
+// If you have need of unexported fields on such a type, you may opt-into
+// ignoring unexported fields by adding the ignore-unexported struct tag to the
+// fx.In. For example,
+//
+// type Params struct {
+// fx.In `ignore-unexported:"true"`
+//
+// Logger *zap.Logger
+// mu sync.Mutex
+// }
+type In = dig.In
+
+// Out is the inverse of In: it can be embedded in result structs to take
+// advantage of advanced features.
+//
+// Modules should return a single result struct that embeds an Out in order to
+// provide a forward-compatible API: since adding fields to a struct is
+// backward-compatible, minor releases can provide additional types.
+//
+// # Result Structs
+//
+// Result structs are the inverse of parameter structs (discussed in the In
+// documentation). These structs represent multiple outputs from a
+// single function as fields. Fx treats all structs embedding fx.Out as result
+// structs, so other constructors can rely on the result struct's fields
+// directly.
+//
+// Without result structs, we sometimes have function definitions like this:
+//
+// func SetupGateways(conn *sql.DB) (*UserGateway, *CommentGateway, *PostGateway, error) {
+// // ...
+// }
+//
+// With result structs, we can make this both more readable and easier to
+// modify in the future:
+//
+// type Gateways struct {
+// fx.Out
+//
+// Users *UserGateway
+// Comments *CommentGateway
+// Posts *PostGateway
+// }
+//
+// func SetupGateways(conn *sql.DB) (Gateways, error) {
+// // ...
+// }
+//
+// # Named Values
+//
+// Some use cases require the application container to hold multiple values of
+// the same type. For details on consuming named values, see the documentation
+// for the In type.
+//
+// A constructor that produces a result struct can tag any field with
+// `name:".."` to have the corresponding value added to the graph under the
+// specified name. An application may contain at most one unnamed value of a
+// given type, but may contain any number of named values of the same type.
+//
+// type ConnectionResult struct {
+// fx.Out
+//
+// ReadWrite *sql.DB `name:"rw"`
+// ReadOnly *sql.DB `name:"ro"`
+// }
+//
+// func ConnectToDatabase(...) (ConnectionResult, error) {
+// // ...
+// return ConnectionResult{ReadWrite: rw, ReadOnly: ro}, nil
+// }
+//
+// # Value Groups
+//
+// To make it easier to produce and consume many values of the same type, Fx
+// supports named, unordered collections called value groups. For details on
+// consuming value groups, see the documentation for the In type.
+//
+// Constructors can send values into value groups by returning a result struct
+// tagged with `group:".."`.
+//
+// type HandlerResult struct {
+// fx.Out
+//
+// Handler Handler `group:"server"`
+// }
+//
+// func NewHelloHandler() HandlerResult {
+// // ...
+// }
+//
+// func NewEchoHandler() HandlerResult {
+// // ...
+// }
+//
+// Any number of constructors may provide values to this named collection, but
+// the ordering of the final collection is unspecified. Keep in mind that
+// value groups require parameter and result structs to use fields with
+// different types: if a group of constructors each returns type T, parameter
+// structs consuming the group must use a field of type []T.
+//
+// To provide multiple values for a group from a result struct, produce a
+// slice and use the `,flatten` option on the group tag. This indicates that
+// each element in the slice should be injected into the group individually.
+//
+// type IntResult struct {
+// fx.Out
+//
+// Handler []int `group:"server"` // Consume as [][]int
+// Handler []int `group:"server,flatten"` // Consume as []int
+// }
+type Out = dig.Out
diff --git a/vendor/go.uber.org/fx/internal/fxclock/clock.go b/vendor/go.uber.org/fx/internal/fxclock/clock.go
new file mode 100644
index 000000000..bf1c7058f
--- /dev/null
+++ b/vendor/go.uber.org/fx/internal/fxclock/clock.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxclock
+
+import (
+ "context"
+ "time"
+)
+
+// Clock defines how Fx accesses time.
+// The interface is pretty minimal but it matches github.com/benbjohnson/clock.
+// We intentionally don't use that interface directly;
+// this keeps it a test dependency for us.
+type Clock interface {
+ Now() time.Time
+ Since(time.Time) time.Duration
+ Sleep(time.Duration)
+ WithTimeout(context.Context, time.Duration) (context.Context, context.CancelFunc)
+}
+
+// System is the default implementation of Clock based on real time.
+var System Clock = systemClock{}
+
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) Since(t time.Time) time.Duration {
+ return time.Since(t)
+}
+
+func (systemClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+func (systemClock) WithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ return context.WithTimeout(ctx, d)
+}
diff --git a/vendor/go.uber.org/fx/internal/fxlog/default.go b/vendor/go.uber.org/fx/internal/fxlog/default.go
new file mode 100644
index 000000000..a6424a640
--- /dev/null
+++ b/vendor/go.uber.org/fx/internal/fxlog/default.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxlog
+
+import (
+ "io"
+
+ "go.uber.org/fx/fxevent"
+)
+
+// DefaultLogger constructs a Logger out of io.Writer.
+func DefaultLogger(w io.Writer) fxevent.Logger {
+ return &fxevent.ConsoleLogger{W: w}
+}
diff --git a/vendor/go.uber.org/fx/internal/fxlog/spy.go b/vendor/go.uber.org/fx/internal/fxlog/spy.go
new file mode 100644
index 000000000..6be50a996
--- /dev/null
+++ b/vendor/go.uber.org/fx/internal/fxlog/spy.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxlog
+
+import (
+ "reflect"
+ "sync"
+
+ "go.uber.org/fx/fxevent"
+)
+
+// Events is a list of events captured by fxlog.Spy.
+type Events []fxevent.Event
+
+// Len returns the number of events in this list.
+func (es Events) Len() int { return len(es) }
+
+// SelectByTypeName returns a new list with only events matching the specified
+// type.
+func (es Events) SelectByTypeName(name string) Events {
+ var out Events
+ for _, e := range es {
+ if reflect.TypeOf(e).Elem().Name() == name {
+ out = append(out, e)
+ }
+ }
+ return out
+}
+
+// Spy is an Fx event logger that captures emitted events and/or logged
+// statements. It may be used in tests of Fx logs.
+type Spy struct {
+ mu sync.RWMutex
+ events Events
+}
+
+var _ fxevent.Logger = &Spy{}
+
+// LogEvent appends an Event.
+func (s *Spy) LogEvent(event fxevent.Event) {
+ s.mu.Lock()
+ s.events = append(s.events, event)
+ s.mu.Unlock()
+}
+
+// Events returns all captured events.
+func (s *Spy) Events() Events {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ events := make(Events, len(s.events))
+ copy(events, s.events)
+ return events
+}
+
+// EventTypes returns all captured event types.
+func (s *Spy) EventTypes() []string {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ types := make([]string, len(s.events))
+ for i, e := range s.events {
+ types[i] = reflect.TypeOf(e).Elem().Name()
+ }
+ return types
+}
+
+// Reset clears all messages and events from the Spy.
+func (s *Spy) Reset() {
+ s.mu.Lock()
+ s.events = s.events[:0]
+ s.mu.Unlock()
+}
diff --git a/vendor/go.uber.org/fx/internal/fxreflect/fxreflect.go b/vendor/go.uber.org/fx/internal/fxreflect/fxreflect.go
new file mode 100644
index 000000000..c4ab1c675
--- /dev/null
+++ b/vendor/go.uber.org/fx/internal/fxreflect/fxreflect.go
@@ -0,0 +1,86 @@
+// Copyright (c) 2019-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxreflect
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+// Match from beginning of the line until the first `vendor/` (non-greedy)
+var vendorRe = regexp.MustCompile("^.*?/vendor/")
+
+// sanitize makes the function name suitable for logging display. It removes
+// url-encoded elements from the `dot.git` package names and shortens the
+// vendored paths.
+func sanitize(function string) string {
+ // Use the stdlib to un-escape any package import paths which can happen
+ // in the case of the "dot-git" postfix. Seems like a bug in stdlib =/
+ if unescaped, err := url.QueryUnescape(function); err == nil {
+ function = unescaped
+ }
+
+ // strip everything prior to the vendor
+ return vendorRe.ReplaceAllString(function, "vendor/")
+}
+
+// Caller returns the formatted calling func name
+func Caller() string {
+ return CallerStack(1, 0).CallerName()
+}
+
+// FuncName returns a funcs formatted name
+func FuncName(fn interface{}) string {
+ fnV := reflect.ValueOf(fn)
+ if fnV.Kind() != reflect.Func {
+ return fmt.Sprint(fn)
+ }
+
+ function := runtime.FuncForPC(fnV.Pointer()).Name()
+ return fmt.Sprintf("%s()", sanitize(function))
+}
+
+// Ascend the call stack until we leave the Fx production code. This allows us
+// to avoid hard-coding a frame skip, which makes this code work well even
+// when it's wrapped.
+func shouldIgnoreFrame(f Frame) bool {
+ // Treat test files as leafs.
+ if strings.Contains(f.File, "_test.go") {
+ return false
+ }
+
+ // The unique, fully-qualified name for all functions begins with
+ // "{{importPath}}.". We'll ignore Fx and its subpackages.
+ s := strings.TrimPrefix(f.Function, "go.uber.org/fx")
+ if len(s) > 0 && s[0] == '.' || s[0] == '/' {
+ // We want to match,
+ // go.uber.org/fx.Foo
+ // go.uber.org/fx/something.Foo
+ // But not, go.uber.org/fxfoo
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/go.uber.org/fx/internal/fxreflect/stack.go b/vendor/go.uber.org/fx/internal/fxreflect/stack.go
new file mode 100644
index 000000000..eb64618e3
--- /dev/null
+++ b/vendor/go.uber.org/fx/internal/fxreflect/stack.go
@@ -0,0 +1,149 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fxreflect
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+)
+
+// Frame holds information about a single frame in the call stack.
+type Frame struct {
+ // Unique, package path-qualified name for the function of this call
+ // frame.
+ Function string
+
+ // File and line number of our location in the frame.
+ //
+ // Note that the line number does not refer to where the function was
+ // defined but where in the function the next call was made.
+ File string
+ Line int
+}
+
+func (f Frame) String() string {
+ // This takes the following forms.
+ // (path/to/file.go)
+ // (path/to/file.go:42)
+ // path/to/package.MyFunction
+ // path/to/package.MyFunction (path/to/file.go)
+ // path/to/package.MyFunction (path/to/file.go:42)
+
+ var sb strings.Builder
+ sb.WriteString(f.Function)
+ if len(f.File) > 0 {
+ if sb.Len() > 0 {
+ sb.WriteRune(' ')
+ }
+ fmt.Fprintf(&sb, "(%v", f.File)
+ if f.Line > 0 {
+ fmt.Fprintf(&sb, ":%d", f.Line)
+ }
+ sb.WriteRune(')')
+ }
+
+ if sb.Len() == 0 {
+ return "unknown"
+ }
+
+ return sb.String()
+}
+
+const _defaultCallersDepth = 8
+
+// Stack is a stack of call frames.
+//
+// Formatted with %v, the output is in a single-line, in the form,
+//
+// foo/bar.Baz() (path/to/foo.go:42); bar/baz.Qux() (bar/baz/qux.go:12); ...
+//
+// Formatted with %+v, the output is in the form,
+//
+// foo/bar.Baz()
+// path/to/foo.go:42
+// bar/baz.Qux()
+// bar/baz/qux.go:12
+type Stack []Frame
+
+// Returns a single-line, semi-colon representation of a Stack. For a
+// multi-line representation, use %+v.
+func (fs Stack) String() string {
+ items := make([]string, len(fs))
+ for i, f := range fs {
+ items[i] = f.String()
+ }
+ return strings.Join(items, "; ")
+}
+
+// Format implements fmt.Formatter to handle "%+v".
+func (fs Stack) Format(w fmt.State, c rune) {
+ if !w.Flag('+') {
+ // Without %+v, fall back to String().
+ io.WriteString(w, fs.String())
+ return
+ }
+
+ for _, f := range fs {
+ fmt.Fprintln(w, f.Function)
+ fmt.Fprintf(w, "\t%v:%v\n", f.File, f.Line)
+ }
+}
+
+// CallerName returns the name of the first caller in this stack that isn't
+// owned by the Fx library.
+func (fs Stack) CallerName() string {
+ for _, f := range fs {
+ if shouldIgnoreFrame(f) {
+ continue
+ }
+ return f.Function
+ }
+ return "n/a"
+}
+
+// CallerStack returns the call stack for the calling function, up to depth frames
+// deep, skipping the provided number of frames, not including Callers itself.
+//
+// If zero, depth defaults to 8.
+func CallerStack(skip, depth int) Stack {
+ if depth <= 0 {
+ depth = _defaultCallersDepth
+ }
+
+ pcs := make([]uintptr, depth)
+
+ // +2 to skip this frame and runtime.Callers.
+ n := runtime.Callers(skip+2, pcs)
+ pcs = pcs[:n] // truncate to number of frames actually read
+
+ result := make([]Frame, 0, n)
+ frames := runtime.CallersFrames(pcs)
+ for f, more := frames.Next(); more; f, more = frames.Next() {
+ result = append(result, Frame{
+ Function: sanitize(f.Function),
+ File: f.File,
+ Line: f.Line,
+ })
+ }
+ return result
+}
diff --git a/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go b/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go
new file mode 100644
index 000000000..dfac47a8c
--- /dev/null
+++ b/vendor/go.uber.org/fx/internal/lifecycle/lifecycle.go
@@ -0,0 +1,398 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package lifecycle
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/fx/fxevent"
+ "go.uber.org/fx/internal/fxclock"
+ "go.uber.org/fx/internal/fxreflect"
+ "go.uber.org/multierr"
+)
+
+// Reflection types for each of the supported hook function signatures. These
+// are used in cases in which the Callable constraint matches a user-defined
+// function type that cannot be converted to an underlying function type with
+// a conventional conversion or type switch.
+var (
+ _reflFunc = reflect.TypeOf(Func(nil))
+ _reflErrorFunc = reflect.TypeOf(ErrorFunc(nil))
+ _reflContextFunc = reflect.TypeOf(ContextFunc(nil))
+ _reflContextErrorFunc = reflect.TypeOf(ContextErrorFunc(nil))
+)
+
+// Discrete function signatures that are allowed as part of a [Callable].
+type (
+ // A Func can be converted to a ContextErrorFunc.
+ Func = func()
+ // An ErrorFunc can be converted to a ContextErrorFunc.
+ ErrorFunc = func() error
+ // A ContextFunc can be converted to a ContextErrorFunc.
+ ContextFunc = func(context.Context)
+ // A ContextErrorFunc is used as a [Hook.OnStart] or [Hook.OnStop]
+ // function.
+ ContextErrorFunc = func(context.Context) error
+)
+
+// A Callable is a constraint that matches functions that are, or can be
+// converted to, functions suitable for a Hook.
+//
+// Callable must be identical to [fx.HookFunc].
+type Callable interface {
+ ~Func | ~ErrorFunc | ~ContextFunc | ~ContextErrorFunc
+}
+
+// Wrap wraps x into a ContextErrorFunc suitable for a Hook.
+func Wrap[T Callable](x T) (ContextErrorFunc, string) {
+ if x == nil {
+ return nil, ""
+ }
+
+ switch fn := any(x).(type) {
+ case Func:
+ return func(context.Context) error {
+ fn()
+ return nil
+ }, fxreflect.FuncName(x)
+ case ErrorFunc:
+ return func(context.Context) error {
+ return fn()
+ }, fxreflect.FuncName(x)
+ case ContextFunc:
+ return func(ctx context.Context) error {
+ fn(ctx)
+ return nil
+ }, fxreflect.FuncName(x)
+ case ContextErrorFunc:
+ return fn, fxreflect.FuncName(x)
+ }
+
+ // Since (1) we're already using reflect in Fx, (2) we're not particularly
+ // concerned with performance, and (3) unsafe would require discrete build
+ // targets for appengine (etc), just use reflect to convert user-defined
+ // function types to their underlying function types and then call Wrap
+ // again with the converted value.
+ reflVal := reflect.ValueOf(x)
+ switch {
+ case reflVal.CanConvert(_reflFunc):
+ return Wrap(reflVal.Convert(_reflFunc).Interface().(Func))
+ case reflVal.CanConvert(_reflErrorFunc):
+ return Wrap(reflVal.Convert(_reflErrorFunc).Interface().(ErrorFunc))
+ case reflVal.CanConvert(_reflContextFunc):
+ return Wrap(reflVal.Convert(_reflContextFunc).Interface().(ContextFunc))
+ default:
+ // Is already convertible to ContextErrorFunc.
+ return Wrap(reflVal.Convert(_reflContextErrorFunc).Interface().(ContextErrorFunc))
+ }
+}
+
+// A Hook is a pair of start and stop callbacks, either of which can be nil,
+// plus a string identifying the supplier of the hook.
+type Hook struct {
+ OnStart func(context.Context) error
+ OnStop func(context.Context) error
+ OnStartName string
+ OnStopName string
+
+ callerFrame fxreflect.Frame
+}
+
+type appState int
+
+const (
+ stopped appState = iota
+ starting
+ incompleteStart
+ started
+ stopping
+)
+
+func (as appState) String() string {
+ switch as {
+ case stopped:
+ return "stopped"
+ case starting:
+ return "starting"
+ case incompleteStart:
+ return "incompleteStart"
+ case started:
+ return "started"
+ case stopping:
+ return "stopping"
+ default:
+ return "invalidState"
+ }
+}
+
+// Lifecycle coordinates application lifecycle hooks.
+type Lifecycle struct {
+ clock fxclock.Clock
+ logger fxevent.Logger
+ state appState
+ hooks []Hook
+ numStarted int
+ startRecords HookRecords
+ stopRecords HookRecords
+ runningHook Hook
+ mu sync.Mutex
+}
+
+// New constructs a new Lifecycle.
+func New(logger fxevent.Logger, clock fxclock.Clock) *Lifecycle {
+ return &Lifecycle{logger: logger, clock: clock}
+}
+
+// Append adds a Hook to the lifecycle.
+func (l *Lifecycle) Append(hook Hook) {
+ // Save the caller's stack frame to report file/line number.
+ if f := fxreflect.CallerStack(2, 0); len(f) > 0 {
+ hook.callerFrame = f[0]
+ }
+ l.hooks = append(l.hooks, hook)
+}
+
+// Start runs all OnStart hooks, returning immediately if it encounters an
+// error.
+func (l *Lifecycle) Start(ctx context.Context) error {
+ if ctx == nil {
+ return errors.New("called OnStart with nil context")
+ }
+
+ l.mu.Lock()
+ if l.state != stopped {
+ defer l.mu.Unlock()
+ return fmt.Errorf("attempted to start lifecycle when in state: %v", l.state)
+ }
+ l.numStarted = 0
+ l.state = starting
+
+ l.startRecords = make(HookRecords, 0, len(l.hooks))
+ l.mu.Unlock()
+
+ var returnState appState = incompleteStart
+ defer func() {
+ l.mu.Lock()
+ l.state = returnState
+ l.mu.Unlock()
+ }()
+
+ for _, hook := range l.hooks {
+ // if ctx has cancelled, bail out of the loop.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ if hook.OnStart != nil {
+ l.mu.Lock()
+ l.runningHook = hook
+ l.mu.Unlock()
+
+ runtime, err := l.runStartHook(ctx, hook)
+ if err != nil {
+ return err
+ }
+
+ l.mu.Lock()
+ l.startRecords = append(l.startRecords, HookRecord{
+ CallerFrame: hook.callerFrame,
+ Func: hook.OnStart,
+ Runtime: runtime,
+ })
+ l.mu.Unlock()
+ }
+ l.numStarted++
+ }
+
+ returnState = started
+ return nil
+}
+
+func (l *Lifecycle) runStartHook(ctx context.Context, hook Hook) (runtime time.Duration, err error) {
+ funcName := hook.OnStartName
+ if len(funcName) == 0 {
+ funcName = fxreflect.FuncName(hook.OnStart)
+ }
+
+ l.logger.LogEvent(&fxevent.OnStartExecuting{
+ CallerName: hook.callerFrame.Function,
+ FunctionName: funcName,
+ })
+ defer func() {
+ l.logger.LogEvent(&fxevent.OnStartExecuted{
+ CallerName: hook.callerFrame.Function,
+ FunctionName: funcName,
+ Runtime: runtime,
+ Err: err,
+ })
+ }()
+
+ begin := l.clock.Now()
+ err = hook.OnStart(ctx)
+ return l.clock.Since(begin), err
+}
+
+// Stop runs any OnStop hooks whose OnStart counterpart succeeded. OnStop
+// hooks run in reverse order.
+func (l *Lifecycle) Stop(ctx context.Context) error {
+ if ctx == nil {
+ return errors.New("called OnStop with nil context")
+ }
+
+ l.mu.Lock()
+ if l.state != started && l.state != incompleteStart {
+ defer l.mu.Unlock()
+ return nil
+ }
+ l.state = stopping
+ l.mu.Unlock()
+
+ defer func() {
+ l.mu.Lock()
+ l.state = stopped
+ l.mu.Unlock()
+ }()
+
+ l.mu.Lock()
+ l.stopRecords = make(HookRecords, 0, l.numStarted)
+ l.mu.Unlock()
+
+ // Run backward from last successful OnStart.
+ var errs []error
+ for ; l.numStarted > 0; l.numStarted-- {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ hook := l.hooks[l.numStarted-1]
+ if hook.OnStop == nil {
+ continue
+ }
+
+ l.mu.Lock()
+ l.runningHook = hook
+ l.mu.Unlock()
+
+ runtime, err := l.runStopHook(ctx, hook)
+ if err != nil {
+ // For best-effort cleanup, keep going after errors.
+ errs = append(errs, err)
+ }
+
+ l.mu.Lock()
+ l.stopRecords = append(l.stopRecords, HookRecord{
+ CallerFrame: hook.callerFrame,
+ Func: hook.OnStop,
+ Runtime: runtime,
+ })
+ l.mu.Unlock()
+ }
+
+ return multierr.Combine(errs...)
+}
+
+func (l *Lifecycle) runStopHook(ctx context.Context, hook Hook) (runtime time.Duration, err error) {
+ funcName := hook.OnStopName
+ if len(funcName) == 0 {
+ funcName = fxreflect.FuncName(hook.OnStop)
+ }
+
+ l.logger.LogEvent(&fxevent.OnStopExecuting{
+ CallerName: hook.callerFrame.Function,
+ FunctionName: funcName,
+ })
+ defer func() {
+ l.logger.LogEvent(&fxevent.OnStopExecuted{
+ CallerName: hook.callerFrame.Function,
+ FunctionName: funcName,
+ Runtime: runtime,
+ Err: err,
+ })
+ }()
+
+ begin := l.clock.Now()
+ err = hook.OnStop(ctx)
+ return l.clock.Since(begin), err
+}
+
+// RunningHookCaller returns the name of the hook that was running when a Start/Stop
+// hook timed out.
+func (l *Lifecycle) RunningHookCaller() string {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.runningHook.callerFrame.Function
+}
+
+// HookRecord keeps track of each Hook's execution time, the caller that appended the Hook, and function that ran as the Hook.
+type HookRecord struct {
+ CallerFrame fxreflect.Frame // stack frame of the caller
+ Func func(context.Context) error // function that ran as sanitized name
+ Runtime time.Duration // how long the hook ran
+}
+
+// HookRecords is a Stringer wrapper of HookRecord slice.
+type HookRecords []HookRecord
+
+func (rs HookRecords) Len() int {
+ return len(rs)
+}
+
+func (rs HookRecords) Less(i, j int) bool {
+ // Sort by runtime, greater ones at top.
+ return rs[i].Runtime > rs[j].Runtime
+}
+
+func (rs HookRecords) Swap(i, j int) {
+ rs[i], rs[j] = rs[j], rs[i]
+}
+
+// Used for logging startup errors.
+func (rs HookRecords) String() string {
+ var b strings.Builder
+ for _, r := range rs {
+ fmt.Fprintf(&b, "%s took %v from %s",
+ fxreflect.FuncName(r.Func), r.Runtime, r.CallerFrame)
+ }
+ return b.String()
+}
+
+// Format implements fmt.Formatter to handle "%+v".
+func (rs HookRecords) Format(w fmt.State, c rune) {
+ if !w.Flag('+') {
+ // Without %+v, fall back to String().
+ io.WriteString(w, rs.String())
+ return
+ }
+
+ for _, r := range rs {
+ fmt.Fprintf(w, "\n%s took %v from:\n\t%+v",
+ fxreflect.FuncName(r.Func),
+ r.Runtime,
+ r.CallerFrame)
+ }
+ fmt.Fprintf(w, "\n")
+}
diff --git a/vendor/go.uber.org/fx/invoke.go b/vendor/go.uber.org/fx/invoke.go
new file mode 100644
index 000000000..523aef83b
--- /dev/null
+++ b/vendor/go.uber.org/fx/invoke.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2019-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "strings"
+
+ "go.uber.org/fx/internal/fxreflect"
+)
+
+// Invoke registers functions that are executed eagerly on application start.
+// Arguments for these invocations are built using the constructors registered
+// by Provide. Passing multiple Invoke options appends the new invocations to
+// the application's existing list.
+//
+// Unlike constructors, invocations are always executed, and they're always
+// run in order. Invocations may have any number of returned values.
+// If the final returned object is an error, it indicates whether the operation
+// was successful.
+// All other returned values are discarded.
+//
+// Invokes registered in [Module]s are run before the ones registered at the
+// scope of the parent. Invokes within the same Module is run in the order
+// they were provided. For example,
+//
+// fx.New(
+// fx.Invoke(func3),
+// fx.Module("someModule",
+// fx.Invoke(func1),
+// fx.Invoke(func2),
+// ),
+// fx.Invoke(func4),
+// )
+//
+// invokes func1, func2, func3, func4 in that order.
+//
+// Typically, invoked functions take a handful of high-level objects (whose
+// constructors depend on lower-level objects) and introduce them to each
+// other. This kick-starts the application by forcing it to instantiate a
+// variety of types.
+//
+// To see an invocation in use, read through the package-level example. For
+// advanced features, including optional parameters and named instances, see
+// the documentation of the In and Out types.
+func Invoke(funcs ...interface{}) Option {
+ return invokeOption{
+ Targets: funcs,
+ Stack: fxreflect.CallerStack(1, 0),
+ }
+}
+
+type invokeOption struct {
+ Targets []interface{}
+ Stack fxreflect.Stack
+}
+
+func (o invokeOption) apply(mod *module) {
+ for _, target := range o.Targets {
+ mod.invokes = append(mod.invokes, invoke{
+ Target: target,
+ Stack: o.Stack,
+ })
+ }
+}
+
+func (o invokeOption) String() string {
+ items := make([]string, len(o.Targets))
+ for i, f := range o.Targets {
+ items[i] = fxreflect.FuncName(f)
+ }
+ return fmt.Sprintf("fx.Invoke(%s)", strings.Join(items, ", "))
+}
+func runInvoke(c container, i invoke) error {
+ fn := i.Target
+ switch fn := fn.(type) {
+ case Option:
+ return fmt.Errorf("fx.Option should be passed to fx.New directly, "+
+ "not to fx.Invoke: fx.Invoke received %v from:\n%+v",
+ fn, i.Stack)
+
+ case annotated:
+ af, err := fn.Build()
+ if err != nil {
+ return err
+ }
+
+ return c.Invoke(af)
+ default:
+ return c.Invoke(fn)
+ }
+}
diff --git a/vendor/go.uber.org/fx/lifecycle.go b/vendor/go.uber.org/fx/lifecycle.go
new file mode 100644
index 000000000..50198488e
--- /dev/null
+++ b/vendor/go.uber.org/fx/lifecycle.go
@@ -0,0 +1,147 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "context"
+
+ "go.uber.org/fx/internal/lifecycle"
+)
+
+// A HookFunc is a function that can be used as a [Hook].
+type HookFunc interface {
+ ~func() | ~func() error | ~func(context.Context) | ~func(context.Context) error
+}
+
+// Lifecycle allows constructors to register callbacks that are executed on
+// application start and stop. See the documentation for App for details on Fx
+// applications' initialization, startup, and shutdown logic.
+type Lifecycle interface {
+ Append(Hook)
+}
+
+// A Hook is a pair of start and stop callbacks, either of which can be nil.
+// If a Hook's OnStart callback isn't executed (because a previous OnStart
+// failure short-circuited application startup), its OnStop callback won't be
+// executed.
+type Hook struct {
+ OnStart func(context.Context) error
+ OnStop func(context.Context) error
+
+ onStartName string
+ onStopName string
+}
+
+// StartHook returns a new Hook with start as its [Hook.OnStart] function,
+// wrapping its signature as needed. For example, given the following function:
+//
+// func myhook() {
+// fmt.Println("hook called")
+// }
+//
+// then calling:
+//
+// lifecycle.Append(StartHook(myfunc))
+//
+// is functionally equivalent to calling:
+//
+// lifecycle.Append(fx.Hook{
+// OnStart: func(context.Context) error {
+// myfunc()
+// return nil
+// },
+// })
+//
+// The same is true for all functions that satisfy the HookFunc constraint.
+// Note that any context.Context parameter or error return will be propagated
+// as expected. If propagation is not intended, users should instead provide a
+// closure that discards the undesired value(s), or construct a Hook directly.
+func StartHook[T HookFunc](start T) Hook {
+ onstart, startname := lifecycle.Wrap(start)
+
+ return Hook{
+ OnStart: onstart,
+ onStartName: startname,
+ }
+}
+
+// StopHook returns a new Hook with stop as its [Hook.OnStop] function,
+// wrapping its signature as needed. For example, given the following function:
+//
+// func myhook() {
+// fmt.Println("hook called")
+// }
+//
+// then calling:
+//
+// lifecycle.Append(StopHook(myfunc))
+//
+// is functionally equivalent to calling:
+//
+// lifecycle.Append(fx.Hook{
+// OnStop: func(context.Context) error {
+// myfunc()
+// return nil
+// },
+// })
+//
+// The same is true for all functions that satisfy the HookFunc constraint.
+// Note that any context.Context parameter or error return will be propagated
+// as expected. If propagation is not intended, users should instead provide a
+// closure that discards the undesired value(s), or construct a Hook directly.
+func StopHook[T HookFunc](stop T) Hook {
+ onstop, stopname := lifecycle.Wrap(stop)
+
+ return Hook{
+ OnStop: onstop,
+ onStopName: stopname,
+ }
+}
+
+// StartStopHook returns a new Hook with start as its [Hook.OnStart] function
+// and stop as its [Hook.OnStop] function, independently wrapping the signature
+// of each as needed.
+func StartStopHook[T1 HookFunc, T2 HookFunc](start T1, stop T2) Hook {
+ var (
+ onstart, startname = lifecycle.Wrap(start)
+ onstop, stopname = lifecycle.Wrap(stop)
+ )
+
+ return Hook{
+ OnStart: onstart,
+ OnStop: onstop,
+ onStartName: startname,
+ onStopName: stopname,
+ }
+}
+
+type lifecycleWrapper struct {
+ *lifecycle.Lifecycle
+}
+
+func (l *lifecycleWrapper) Append(h Hook) {
+ l.Lifecycle.Append(lifecycle.Hook{
+ OnStart: h.OnStart,
+ OnStop: h.OnStop,
+ OnStartName: h.onStartName,
+ OnStopName: h.onStopName,
+ })
+}
diff --git a/vendor/go.uber.org/fx/log.go b/vendor/go.uber.org/fx/log.go
new file mode 100644
index 000000000..e3b960c7a
--- /dev/null
+++ b/vendor/go.uber.org/fx/log.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "go.uber.org/fx/fxevent"
+)
+
+// logBuffer will buffer all messages until a logger has been
+// initialized.
+type logBuffer struct {
+ events []fxevent.Event
+ logger fxevent.Logger
+}
+
+// LogEvent buffers or logs an event.
+func (l *logBuffer) LogEvent(event fxevent.Event) {
+ if l.logger == nil {
+ l.events = append(l.events, event)
+ } else {
+ l.logger.LogEvent(event)
+ }
+}
+
+// Connect flushes out all buffered events to a logger and resets them.
+func (l *logBuffer) Connect(logger fxevent.Logger) {
+ l.logger = logger
+ for _, e := range l.events {
+ logger.LogEvent(e)
+ }
+ l.events = nil
+}
diff --git a/vendor/go.uber.org/fx/module.go b/vendor/go.uber.org/fx/module.go
new file mode 100644
index 000000000..7a28ca8d3
--- /dev/null
+++ b/vendor/go.uber.org/fx/module.go
@@ -0,0 +1,290 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+
+ "go.uber.org/dig"
+ "go.uber.org/fx/fxevent"
+ "go.uber.org/fx/internal/fxreflect"
+ "go.uber.org/multierr"
+)
+
+// A container represents a set of constructors to provide
+// dependencies, and a set of functions to invoke once all the
+// dependencies have been initialized.
+//
+// This definition corresponds to the dig.Container and dig.Scope.
+type container interface {
+ Invoke(interface{}, ...dig.InvokeOption) error
+ Provide(interface{}, ...dig.ProvideOption) error
+ Decorate(interface{}, ...dig.DecorateOption) error
+}
+
+// Module is a named group of zero or more fx.Options.
+// A Module creates a scope in which certain operations are taken
+// place. For more information, see [Decorate], [Replace], or [Invoke].
+func Module(name string, opts ...Option) Option {
+ mo := moduleOption{
+ name: name,
+ options: opts,
+ }
+ return mo
+}
+
+type moduleOption struct {
+ name string
+ options []Option
+}
+
+func (o moduleOption) String() string {
+ return fmt.Sprintf("fx.Module(%q, %v)", o.name, o.options)
+}
+
+func (o moduleOption) apply(mod *module) {
+ // This get called on any submodules' that are declared
+ // as part of another module.
+
+ // 1. Create a new module with the parent being the specified
+ // module.
+ // 2. Apply child Options on the new module.
+ // 3. Append it to the parent module.
+ newModule := &module{
+ name: o.name,
+ parent: mod,
+ app: mod.app,
+ }
+ for _, opt := range o.options {
+ opt.apply(newModule)
+ }
+ mod.modules = append(mod.modules, newModule)
+}
+
+type module struct {
+ parent *module
+ name string
+ scope scope
+ provides []provide
+ invokes []invoke
+ decorators []decorator
+ modules []*module
+ app *App
+ log fxevent.Logger
+ fallbackLogger fxevent.Logger
+ logConstructor *provide
+}
+
+// scope is a private wrapper interface for dig.Container and dig.Scope.
+// We can consider moving this into Fx using type constraints after Go 1.20
+// is released and 1.17 is deprecated.
+type scope interface {
+ Decorate(f interface{}, opts ...dig.DecorateOption) error
+ Invoke(f interface{}, opts ...dig.InvokeOption) error
+ Provide(f interface{}, opts ...dig.ProvideOption) error
+ Scope(name string, opts ...dig.ScopeOption) *dig.Scope
+ String() string
+}
+
+// builds the Scopes using the App's Container. Note that this happens
+// after applyModules' are called because the App's Container needs to
+// be built for any Scopes to be initialized, and applys' should be called
+// before the Container can get initialized.
+func (m *module) build(app *App, root *dig.Container) {
+ if m.parent == nil {
+ m.scope = root
+ } else {
+ parentScope := m.parent.scope
+ m.scope = parentScope.Scope(m.name)
+ // use parent module's logger by default
+ m.log = m.parent.log
+ }
+
+ if m.logConstructor != nil {
+ // Since user supplied a custom logger, use a buffered logger
+ // to hold all messages until user supplied logger is
+ // instantiated. Then we flush those messages after fully
+ // constructing the custom logger.
+ m.fallbackLogger, m.log = m.log, new(logBuffer)
+ }
+
+ for _, mod := range m.modules {
+ mod.build(app, root)
+ }
+}
+
+func (m *module) provideAll() {
+ for _, p := range m.provides {
+ m.provide(p)
+ }
+
+ for _, m := range m.modules {
+ m.provideAll()
+ }
+}
+
+func (m *module) provide(p provide) {
+ if m.app.err != nil {
+ return
+ }
+
+ var info dig.ProvideInfo
+ if err := runProvide(m.scope, p, dig.FillProvideInfo(&info), dig.Export(!p.Private)); err != nil {
+ m.app.err = err
+ }
+ var ev fxevent.Event
+ switch {
+ case p.IsSupply:
+ ev = &fxevent.Supplied{
+ TypeName: p.SupplyType.String(),
+ ModuleName: m.name,
+ Err: m.app.err,
+ }
+
+ default:
+ outputNames := make([]string, len(info.Outputs))
+ for i, o := range info.Outputs {
+ outputNames[i] = o.String()
+ }
+
+ ev = &fxevent.Provided{
+ ConstructorName: fxreflect.FuncName(p.Target),
+ ModuleName: m.name,
+ OutputTypeNames: outputNames,
+ Err: m.app.err,
+ Private: p.Private,
+ }
+ }
+ m.log.LogEvent(ev)
+}
+
+// Constructs custom loggers for all modules in the tree
+func (m *module) constructAllCustomLoggers() {
+ if m.logConstructor != nil {
+ if buffer, ok := m.log.(*logBuffer); ok {
+ // default to parent's logger if custom logger constructor fails
+ if err := m.constructCustomLogger(buffer); err != nil {
+ m.app.err = multierr.Append(m.app.err, err)
+ m.log = m.fallbackLogger
+ buffer.Connect(m.log)
+ }
+ }
+ m.fallbackLogger = nil
+ } else if m.parent != nil {
+ m.log = m.parent.log
+ }
+
+ for _, mod := range m.modules {
+ mod.constructAllCustomLoggers()
+ }
+}
+
+// Mirroring the behavior of app.constructCustomLogger
+func (m *module) constructCustomLogger(buffer *logBuffer) (err error) {
+ p := m.logConstructor
+ fname := fxreflect.FuncName(p.Target)
+ defer func() {
+ m.log.LogEvent(&fxevent.LoggerInitialized{
+ Err: err,
+ ConstructorName: fname,
+ })
+ }()
+
+ // TODO: Use dig.FillProvideInfo to inspect the provided constructor
+ // and fail the application if its signature didn't match.
+ if err := m.scope.Provide(p.Target); err != nil {
+ return fmt.Errorf("fx.WithLogger(%v) from:\n%+v\nin Module: %q\nFailed: %w",
+ fname, p.Stack, m.name, err)
+ }
+
+ return m.scope.Invoke(func(log fxevent.Logger) {
+ m.log = log
+ buffer.Connect(log)
+ })
+}
+
+func (m *module) executeInvokes() error {
+ for _, m := range m.modules {
+ if err := m.executeInvokes(); err != nil {
+ return err
+ }
+ }
+
+ for _, invoke := range m.invokes {
+ if err := m.executeInvoke(invoke); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *module) executeInvoke(i invoke) (err error) {
+ fnName := fxreflect.FuncName(i.Target)
+ m.log.LogEvent(&fxevent.Invoking{
+ FunctionName: fnName,
+ ModuleName: m.name,
+ })
+ err = runInvoke(m.scope, i)
+ m.log.LogEvent(&fxevent.Invoked{
+ FunctionName: fnName,
+ ModuleName: m.name,
+ Err: err,
+ Trace: fmt.Sprintf("%+v", i.Stack), // format stack trace as multi-line
+ })
+ return err
+}
+
+func (m *module) decorate() (err error) {
+ for _, decorator := range m.decorators {
+ var info dig.DecorateInfo
+ err := runDecorator(m.scope, decorator, dig.FillDecorateInfo(&info))
+ outputNames := make([]string, len(info.Outputs))
+ for i, o := range info.Outputs {
+ outputNames[i] = o.String()
+ }
+
+ if decorator.IsReplace {
+ m.log.LogEvent(&fxevent.Replaced{
+ ModuleName: m.name,
+ OutputTypeNames: outputNames,
+ Err: err,
+ })
+ } else {
+
+ m.log.LogEvent(&fxevent.Decorated{
+ DecoratorName: fxreflect.FuncName(decorator.Target),
+ ModuleName: m.name,
+ OutputTypeNames: outputNames,
+ Err: err,
+ })
+ }
+ if err != nil {
+ return err
+ }
+ }
+ for _, m := range m.modules {
+ if err := m.decorate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/fx/populate.go b/vendor/go.uber.org/fx/populate.go
new file mode 100644
index 000000000..4195e8080
--- /dev/null
+++ b/vendor/go.uber.org/fx/populate.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Populate sets targets with values from the dependency injection container
+// during application initialization. All targets must be pointers to the
+// values that must be populated. Pointers to structs that embed In are
+// supported, which can be used to populate multiple values in a struct.
+//
+// This is most helpful in unit tests: it lets tests leverage Fx's automatic
+// constructor wiring to build a few structs, but then extract those structs
+// for further testing.
+func Populate(targets ...interface{}) Option {
+ // Validate all targets are non-nil pointers.
+ targetTypes := make([]reflect.Type, len(targets))
+ for i, t := range targets {
+ if t == nil {
+ return Error(fmt.Errorf("failed to Populate: target %v is nil", i+1))
+ }
+ rt := reflect.TypeOf(t)
+ if rt.Kind() != reflect.Ptr {
+ return Error(fmt.Errorf("failed to Populate: target %v is not a pointer type, got %T", i+1, t))
+ }
+
+ targetTypes[i] = reflect.TypeOf(t).Elem()
+ }
+
+ // Build a function that looks like:
+ //
+ // func(t1 T1, t2 T2, ...) {
+ // *targets[0] = t1
+ // *targets[1] = t2
+ // [...]
+ // }
+ //
+ fnType := reflect.FuncOf(targetTypes, nil, false /* variadic */)
+ fn := reflect.MakeFunc(fnType, func(args []reflect.Value) []reflect.Value {
+ for i, arg := range args {
+ reflect.ValueOf(targets[i]).Elem().Set(arg)
+ }
+ return nil
+ })
+ return Invoke(fn.Interface())
+}
diff --git a/vendor/go.uber.org/fx/printer_writer.go b/vendor/go.uber.org/fx/printer_writer.go
new file mode 100644
index 000000000..31a5ab4b6
--- /dev/null
+++ b/vendor/go.uber.org/fx/printer_writer.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2020-2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import "io"
+
+type printerWriter struct{ p Printer }
+
+// writerFromPrinter returns an implementation of io.Writer used to support
+// Logger option which implements Printer interface.
+func writerFromPrinter(p Printer) io.Writer {
+ return &printerWriter{p: p}
+}
+
+func (w *printerWriter) Write(b []byte) (n int, err error) {
+ w.p.Printf(string(b))
+ return len(b), nil
+}
diff --git a/vendor/go.uber.org/fx/provide.go b/vendor/go.uber.org/fx/provide.go
new file mode 100644
index 000000000..eb61ce999
--- /dev/null
+++ b/vendor/go.uber.org/fx/provide.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "go.uber.org/dig"
+ "go.uber.org/fx/internal/fxreflect"
+)
+
+// Provide registers any number of constructor functions, teaching the
+// application how to instantiate various types. The supplied constructor
+// function(s) may depend on other types available in the application, must
+// return one or more objects, and may return an error. For example:
+//
+// // Constructs type *C, depends on *A and *B.
+// func(*A, *B) *C
+//
+// // Constructs type *C, depends on *A and *B, and indicates failure by
+// // returning an error.
+// func(*A, *B) (*C, error)
+//
+// // Constructs types *B and *C, depends on *A, and can fail.
+// func(*A) (*B, *C, error)
+//
+// The order in which constructors are provided doesn't matter, and passing
+// multiple Provide options appends to the application's collection of
+// constructors. Constructors are called only if one or more of their returned
+// types are needed, and their results are cached for reuse (so instances of a
+// type are effectively singletons within an application). Taken together,
+// these properties make it perfectly reasonable to Provide a large number of
+// constructors even if only a fraction of them are used.
+//
+// See the documentation of the In and Out types for advanced features,
+// including optional parameters and named instances.
+//
+// See the documentation for [Private] for restricting access to constructors.
+//
+// Constructor functions should perform as little external interaction as
+// possible, and should avoid spawning goroutines. Things like server listen
+// loops, background timer loops, and background processing goroutines should
+// instead be managed using Lifecycle callbacks.
+func Provide(constructors ...interface{}) Option {
+ return provideOption{
+ Targets: constructors,
+ Stack: fxreflect.CallerStack(1, 0),
+ }
+}
+
+type provideOption struct {
+ Targets []interface{}
+ Stack fxreflect.Stack
+}
+
+func (o provideOption) apply(mod *module) {
+ var private bool
+
+ targets := make([]interface{}, 0, len(o.Targets))
+ for _, target := range o.Targets {
+ if _, ok := target.(privateOption); ok {
+ private = true
+ continue
+ }
+ targets = append(targets, target)
+ }
+
+ for _, target := range targets {
+ mod.provides = append(mod.provides, provide{
+ Target: target,
+ Stack: o.Stack,
+ Private: private,
+ })
+ }
+}
+
+type privateOption struct{}
+
+// Private is an option that can be passed as an argument to [Provide] to
+// restrict access to the constructors being provided. Specifically,
+// corresponding constructors can only be used within the current module
+// or modules the current module contains. Other modules that contain this
+// module won't be able to use the constructor.
+//
+// For example, the following would fail because the app doesn't have access
+// to the inner module's constructor.
+//
+// fx.New(
+// fx.Module("SubModule", fx.Provide(func() int { return 0 }, fx.Private)),
+// fx.Invoke(func(a int) {}),
+// )
+var Private = privateOption{}
+
+func (o provideOption) String() string {
+ items := make([]string, len(o.Targets))
+ for i, c := range o.Targets {
+ items[i] = fxreflect.FuncName(c)
+ }
+ return fmt.Sprintf("fx.Provide(%s)", strings.Join(items, ", "))
+}
+
+func runProvide(c container, p provide, opts ...dig.ProvideOption) error {
+ constructor := p.Target
+ if _, ok := constructor.(Option); ok {
+ return fmt.Errorf("fx.Option should be passed to fx.New directly, "+
+ "not to fx.Provide: fx.Provide received %v from:\n%+v",
+ constructor, p.Stack)
+ }
+
+ switch constructor := constructor.(type) {
+ case annotationError:
+ // fx.Annotate failed. Turn it into an Fx error.
+ return fmt.Errorf(
+ "encountered error while applying annotation using fx.Annotate to %s: %+v",
+ fxreflect.FuncName(constructor.target), constructor.err)
+
+ case annotated:
+ ctor, err := constructor.Build()
+ if err != nil {
+ return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", constructor, p.Stack, err)
+ }
+
+ opts = append(opts, dig.LocationForPC(constructor.FuncPtr))
+ if err := c.Provide(ctor, opts...); err != nil {
+ return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", constructor, p.Stack, err)
+ }
+
+ case Annotated:
+ ann := constructor
+ switch {
+ case len(ann.Group) > 0 && len(ann.Name) > 0:
+ return fmt.Errorf(
+ "fx.Annotated may specify only one of Name or Group: received %v from:\n%+v",
+ ann, p.Stack)
+ case len(ann.Name) > 0:
+ opts = append(opts, dig.Name(ann.Name))
+ case len(ann.Group) > 0:
+ opts = append(opts, dig.Group(ann.Group))
+ }
+
+ if err := c.Provide(ann.Target, opts...); err != nil {
+ return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", ann, p.Stack, err)
+ }
+
+ default:
+ if reflect.TypeOf(constructor).Kind() == reflect.Func {
+ ft := reflect.ValueOf(constructor).Type()
+
+ for i := 0; i < ft.NumOut(); i++ {
+ t := ft.Out(i)
+
+ if t == reflect.TypeOf(Annotated{}) {
+ return fmt.Errorf(
+ "fx.Annotated should be passed to fx.Provide directly, "+
+ "it should not be returned by the constructor: "+
+ "fx.Provide received %v from:\n%+v",
+ fxreflect.FuncName(constructor), p.Stack)
+ }
+ }
+ }
+
+ if err := c.Provide(constructor, opts...); err != nil {
+ return fmt.Errorf("fx.Provide(%v) from:\n%+vFailed: %v", fxreflect.FuncName(constructor), p.Stack, err)
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/fx/replace.go b/vendor/go.uber.org/fx/replace.go
new file mode 100644
index 000000000..3ced9a915
--- /dev/null
+++ b/vendor/go.uber.org/fx/replace.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "go.uber.org/fx/internal/fxreflect"
+)
+
+// Replace provides instantiated values for graph modification as if
+// they had been provided using a decorator with fx.Decorate.
+// The most specific type of each value (as determined by reflection) is used.
+//
+// Refer to the documentation on fx.Decorate to see how graph modifications
+// work with fx.Module.
+//
+// This serves a purpose similar to what fx.Supply does for fx.Provide.
+//
+// For example, given,
+//
+// var log *zap.Logger = ...
+//
+// The following two forms are equivalent.
+//
+// fx.Replace(log)
+//
+// fx.Decorate(
+// func() *zap.Logger {
+// return log
+// },
+// )
+//
+// Replace panics if a value (or annotation target) is an untyped nil or an error.
+//
+// # Replace Caveats
+//
+// As mentioned above, Replace uses the most specific type of the provided
+// value. For interface values, this refers to the type of the implementation,
+// not the interface. So if you try to replace an io.Writer, fx.Replace will
+// use the type of the implementation.
+//
+// var stderr io.Writer = os.Stderr
+// fx.Replace(stderr)
+//
+// Is equivalent to,
+//
+// fx.Decorate(func() *os.File { return os.Stderr })
+//
+// This is typically NOT what you intended. To replace the io.Writer in the
+// container with the value above, we need to use the fx.Annotate function with
+// the fx.As annotation.
+//
+// fx.Replace(
+// fx.Annotate(os.Stderr, fx.As(new(io.Writer)))
+// )
+func Replace(values ...interface{}) Option {
+ decorators := make([]interface{}, len(values)) // one function per value
+ types := make([]reflect.Type, len(values))
+ for i, value := range values {
+ switch value := value.(type) {
+ case annotated:
+ var typ reflect.Type
+ value.Target, typ = newReplaceDecorator(value.Target)
+ decorators[i] = value
+ types[i] = typ
+ default:
+ decorators[i], types[i] = newReplaceDecorator(value)
+ }
+ }
+
+ return replaceOption{
+ Targets: decorators,
+ Types: types,
+ Stack: fxreflect.CallerStack(1, 0),
+ }
+}
+
+type replaceOption struct {
+ Targets []interface{}
+ Types []reflect.Type // type of value produced by constructor[i]
+ Stack fxreflect.Stack
+}
+
+func (o replaceOption) apply(m *module) {
+ for _, target := range o.Targets {
+ m.decorators = append(m.decorators, decorator{
+ Target: target,
+ Stack: o.Stack,
+ IsReplace: true,
+ })
+ }
+}
+
+func (o replaceOption) String() string {
+ items := make([]string, 0, len(o.Targets))
+ for _, typ := range o.Types {
+ items = append(items, typ.String())
+ }
+ return fmt.Sprintf("fx.Replace(%s)", strings.Join(items, ", "))
+}
+
+// Returns a function that takes no parameters, and returns the given value.
+func newReplaceDecorator(value interface{}) (interface{}, reflect.Type) {
+ switch value.(type) {
+ case nil:
+ panic("untyped nil passed to fx.Replace")
+ case error:
+ panic("error value passed to fx.Replace")
+ }
+
+ typ := reflect.TypeOf(value)
+ returnTypes := []reflect.Type{typ}
+ returnValues := []reflect.Value{reflect.ValueOf(value)}
+
+ ft := reflect.FuncOf([]reflect.Type{}, returnTypes, false)
+ fv := reflect.MakeFunc(ft, func([]reflect.Value) []reflect.Value {
+ return returnValues
+ })
+
+ return fv.Interface(), typ
+}
diff --git a/vendor/go.uber.org/fx/shutdown.go b/vendor/go.uber.org/fx/shutdown.go
new file mode 100644
index 000000000..aa81e68d3
--- /dev/null
+++ b/vendor/go.uber.org/fx/shutdown.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "context"
+ "time"
+)
+
+// Shutdowner provides a method that can manually trigger the shutdown of the
+// application by sending a signal to all open Done channels. Shutdowner works
+// on applications using Run as well as Start, Done, and Stop. The Shutdowner is
+// provided to all Fx applications.
+type Shutdowner interface {
+ Shutdown(...ShutdownOption) error
+}
+
+// ShutdownOption provides a way to configure properties of the shutdown
+// process. Currently, no options have been implemented.
+type ShutdownOption interface {
+ apply(*shutdowner)
+}
+
+type exitCodeOption int
+
+func (code exitCodeOption) apply(s *shutdowner) {
+ s.exitCode = int(code)
+}
+
+var _ ShutdownOption = exitCodeOption(0)
+
+// ExitCode is a [ShutdownOption] that may be passed to the Shutdown method of the
+// [Shutdowner] interface.
+// The given integer exit code will be broadcasted to any receiver waiting
+// on a [ShutdownSignal] from the [Wait] method.
+func ExitCode(code int) ShutdownOption {
+ return exitCodeOption(code)
+}
+
+type shutdownTimeoutOption time.Duration
+
+func (to shutdownTimeoutOption) apply(s *shutdowner) {
+ s.shutdownTimeout = time.Duration(to)
+}
+
+var _ ShutdownOption = shutdownTimeoutOption(0)
+
+// ShutdownTimeout is a [ShutdownOption] that allows users to specify a timeout
+// for a given call to Shutdown method of the [Shutdowner] interface. As the
+// Shutdown method will block while waiting for a signal receiver relay
+// goroutine to stop.
+func ShutdownTimeout(timeout time.Duration) ShutdownOption {
+ return shutdownTimeoutOption(timeout)
+}
+
+type shutdowner struct {
+ app *App
+ exitCode int
+ shutdownTimeout time.Duration
+}
+
+// Shutdown broadcasts a signal to all of the application's Done channels
+// and begins the Stop process. Applications can be shut down only after they
+// have finished starting up.
+// In practice this means Shutdowner.Shutdown should not be called from an
+// fx.Invoke, but from a fx.Lifecycle.OnStart hook.
+func (s *shutdowner) Shutdown(opts ...ShutdownOption) error {
+ for _, opt := range opts {
+ opt.apply(s)
+ }
+
+ ctx := context.Background()
+
+ if s.shutdownTimeout != time.Duration(0) {
+ c, cancel := context.WithTimeout(
+ context.Background(),
+ s.shutdownTimeout,
+ )
+ defer cancel()
+ ctx = c
+ }
+
+ defer s.app.receivers.Stop(ctx)
+
+ return s.app.receivers.Broadcast(ShutdownSignal{
+ Signal: _sigTERM,
+ ExitCode: s.exitCode,
+ })
+}
+
+func (app *App) shutdowner() Shutdowner {
+ return &shutdowner{app: app}
+}
diff --git a/vendor/go.uber.org/fx/signal.go b/vendor/go.uber.org/fx/signal.go
new file mode 100644
index 000000000..1593c5de2
--- /dev/null
+++ b/vendor/go.uber.org/fx/signal.go
@@ -0,0 +1,253 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPSignalE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "sync"
+)
+
+// ShutdownSignal represents a signal to be written to Wait or Done.
+// Should a user call the Shutdown method via the Shutdowner interface with
+// a provided ExitCode, that exit code will be populated in the ExitCode field.
+//
+// Should the application receive an operating system signal,
+// the Signal field will be populated with the received os.Signal.
+type ShutdownSignal struct {
+ Signal os.Signal
+ ExitCode int
+}
+
+// String will render a ShutdownSignal type as a string suitable for printing.
+func (sig ShutdownSignal) String() string {
+ return fmt.Sprintf("%v", sig.Signal)
+}
+
+func newSignalReceivers() signalReceivers {
+ return signalReceivers{
+ notify: signal.Notify,
+ signals: make(chan os.Signal, 1),
+ }
+}
+
+type signalReceivers struct {
+ // this mutex protects writes and reads of this struct to prevent
+ // race conditions in a parallel execution pattern
+ m sync.Mutex
+
+ // our os.Signal channel we relay from
+ signals chan os.Signal
+ // when written to, will instruct the signal relayer to shutdown
+ shutdown chan struct{}
+ // is written to when signal relay has finished shutting down
+ finished chan struct{}
+
+ // this stub allows us to unit test signal relay functionality
+ notify func(c chan<- os.Signal, sig ...os.Signal)
+
+ // last will contain a pointer to the last ShutdownSignal received, or
+ // nil if none, if a new channel is created by Wait or Done, this last
+ // signal will be immediately written to, this allows Wait or Done state
+ // to be read after application stop
+ last *ShutdownSignal
+
+ // contains channels created by Done
+ done []chan os.Signal
+
+ // contains channels created by Wait
+ wait []chan ShutdownSignal
+}
+
+func (recv *signalReceivers) relayer(ctx context.Context) {
+ defer func() {
+ recv.finished <- struct{}{}
+ }()
+
+ select {
+ case <-recv.shutdown:
+ return
+ case signal := <-recv.signals:
+ recv.Broadcast(ShutdownSignal{
+ Signal: signal,
+ })
+ }
+}
+
+// running returns true if the the signal relay go-routine is running.
+// this method must be invoked under locked mutex to avoid race condition.
+func (recv *signalReceivers) running() bool {
+ return recv.shutdown != nil && recv.finished != nil
+}
+
+func (recv *signalReceivers) Start(ctx context.Context) {
+ recv.m.Lock()
+ defer recv.m.Unlock()
+
+ // if the receiver has already been started; don't start it again
+ if recv.running() {
+ return
+ }
+
+ recv.last = nil
+ recv.finished = make(chan struct{}, 1)
+ recv.shutdown = make(chan struct{}, 1)
+ recv.notify(recv.signals, os.Interrupt, _sigINT, _sigTERM)
+ go recv.relayer(ctx)
+}
+
+func (recv *signalReceivers) Stop(ctx context.Context) error {
+ recv.m.Lock()
+ defer recv.m.Unlock()
+
+ // if the relayer is not running; return nil error
+ if !recv.running() {
+ return nil
+ }
+
+ recv.shutdown <- struct{}{}
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-recv.finished:
+ close(recv.shutdown)
+ close(recv.finished)
+ recv.shutdown = nil
+ recv.finished = nil
+ return nil
+ }
+}
+
+func (recv *signalReceivers) Done() chan os.Signal {
+ recv.m.Lock()
+ defer recv.m.Unlock()
+
+ ch := make(chan os.Signal, 1)
+
+ // If we had received a signal prior to the call of done, send it's
+ // os.Signal to the new channel.
+ // However we still want to have the operating system notify signals to this
+ // channel should the application receive another.
+ if recv.last != nil {
+ ch <- recv.last.Signal
+ }
+
+ recv.done = append(recv.done, ch)
+ return ch
+}
+
+func (recv *signalReceivers) Wait() chan ShutdownSignal {
+ recv.m.Lock()
+ defer recv.m.Unlock()
+
+ ch := make(chan ShutdownSignal, 1)
+
+ if recv.last != nil {
+ ch <- *recv.last
+ }
+
+ recv.wait = append(recv.wait, ch)
+ return ch
+}
+
+func (recv *signalReceivers) Broadcast(signal ShutdownSignal) error {
+ recv.m.Lock()
+ defer recv.m.Unlock()
+
+ recv.last = &signal
+
+ channels, unsent := recv.broadcast(
+ signal,
+ recv.broadcastDone,
+ recv.broadcastWait,
+ )
+
+ if unsent != 0 {
+ return &unsentSignalError{
+ Signal: signal,
+ Total: channels,
+ Unsent: unsent,
+ }
+ }
+
+ return nil
+}
+
+func (recv *signalReceivers) broadcast(
+ signal ShutdownSignal,
+ anchors ...func(ShutdownSignal) (int, int),
+) (int, int) {
+ var channels, unsent int
+
+ for _, anchor := range anchors {
+ c, u := anchor(signal)
+ channels += c
+ unsent += u
+ }
+
+ return channels, unsent
+}
+
+func (recv *signalReceivers) broadcastDone(signal ShutdownSignal) (int, int) {
+ var unsent int
+
+ for _, reader := range recv.done {
+ select {
+ case reader <- signal.Signal:
+ default:
+ unsent++
+ }
+ }
+
+ return len(recv.done), unsent
+}
+
+func (recv *signalReceivers) broadcastWait(signal ShutdownSignal) (int, int) {
+ var unsent int
+
+ for _, reader := range recv.wait {
+ select {
+ case reader <- signal:
+ default:
+ unsent++
+ }
+ }
+
+ return len(recv.wait), unsent
+}
+
+type unsentSignalError struct {
+ Signal ShutdownSignal
+ Unsent int
+ Total int
+}
+
+func (err *unsentSignalError) Error() string {
+ return fmt.Sprintf(
+ "send %v signal: %v/%v channels are blocked",
+ err.Signal,
+ err.Unsent,
+ err.Total,
+ )
+}
diff --git a/vendor/go.uber.org/fx/supply.go b/vendor/go.uber.org/fx/supply.go
new file mode 100644
index 000000000..f3997289e
--- /dev/null
+++ b/vendor/go.uber.org/fx/supply.go
@@ -0,0 +1,151 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "go.uber.org/fx/internal/fxreflect"
+)
+
+// Supply provides instantiated values for dependency injection as if
+// they had been provided using a constructor that simply returns them.
+// The most specific type of each value (as determined by reflection) is used.
+//
+// This serves a purpose similar to what fx.Replace does for fx.Decorate.
+//
+// For example, given:
+//
+// type (
+// TypeA struct{}
+// TypeB struct{}
+// TypeC struct{}
+// )
+//
+// var a, b, c = &TypeA{}, TypeB{}, &TypeC{}
+//
+// The following two forms are equivalent:
+//
+// fx.Supply(a, b, fx.Annotated{Target: c})
+//
+// fx.Provide(
+// func() *TypeA { return a },
+// func() TypeB { return b },
+// fx.Annotated{Target: func() *TypeC { return c }},
+// )
+//
+// Supply panics if a value (or annotation target) is an untyped nil or an error.
+//
+// # Supply Caveats
+//
+// As mentioned above, Supply uses the most specific type of the provided
+// value. For interface values, this refers to the type of the implementation,
+// not the interface. So if you supply an http.Handler, fx.Supply will use the
+// type of the implementation.
+//
+// var handler http.Handler = http.HandlerFunc(f)
+// fx.Supply(handler)
+//
+// Is equivalent to,
+//
+// fx.Provide(func() http.HandlerFunc { return f })
+//
+// This is typically NOT what you intended. To supply the handler above as an
+// http.Handler, we need to use the fx.Annotate function with the fx.As
+// annotation.
+//
+// fx.Supply(
+// fx.Annotate(handler, fx.As(new(http.Handler))),
+// )
+func Supply(values ...interface{}) Option {
+ constructors := make([]interface{}, len(values)) // one function per value
+ types := make([]reflect.Type, len(values))
+ for i, value := range values {
+ switch value := value.(type) {
+ case annotated:
+ var typ reflect.Type
+ value.Target, typ = newSupplyConstructor(value.Target)
+ constructors[i] = value
+ types[i] = typ
+ case Annotated:
+ var typ reflect.Type
+ value.Target, typ = newSupplyConstructor(value.Target)
+ constructors[i] = value
+ types[i] = typ
+ default:
+ constructors[i], types[i] = newSupplyConstructor(value)
+ }
+ }
+
+ return supplyOption{
+ Targets: constructors,
+ Types: types,
+ Stack: fxreflect.CallerStack(1, 0),
+ }
+}
+
+type supplyOption struct {
+ Targets []interface{}
+ Types []reflect.Type // type of value produced by constructor[i]
+ Stack fxreflect.Stack
+}
+
+func (o supplyOption) apply(m *module) {
+ for i, target := range o.Targets {
+ m.provides = append(m.provides, provide{
+ Target: target,
+ Stack: o.Stack,
+ IsSupply: true,
+ SupplyType: o.Types[i],
+ })
+ }
+}
+
+func (o supplyOption) String() string {
+ items := make([]string, 0, len(o.Targets))
+ for _, typ := range o.Types {
+ items = append(items, typ.String())
+ }
+ return fmt.Sprintf("fx.Supply(%s)", strings.Join(items, ", "))
+}
+
+// Returns a function that takes no parameters, and returns the given value.
+func newSupplyConstructor(value interface{}) (interface{}, reflect.Type) {
+ switch value.(type) {
+ case nil:
+ panic("untyped nil passed to fx.Supply")
+ case error:
+ panic("error value passed to fx.Supply")
+ }
+
+ typ := reflect.TypeOf(value)
+ returnTypes := []reflect.Type{typ}
+ returnValues := []reflect.Value{reflect.ValueOf(value)}
+
+ ft := reflect.FuncOf([]reflect.Type{}, returnTypes, false)
+ fv := reflect.MakeFunc(ft, func([]reflect.Value) []reflect.Value {
+ return returnValues
+ })
+
+ return fv.Interface(), typ
+}
diff --git a/vendor/go.uber.org/fx/version.go b/vendor/go.uber.org/fx/version.go
new file mode 100644
index 000000000..971d8b955
--- /dev/null
+++ b/vendor/go.uber.org/fx/version.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fx
+
+// Version is exported for runtime compatibility checks.
+const Version = "1.19.2"
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index 3ba05276f..f8177b978 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,29 @@
Releases
========
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
v1.8.0 (2022-02-28)
===================
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 70aacecd7..5ab6ac40f 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index f45af149c..3a828b2df 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2017-2021 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,106 +20,109 @@
// Package multierr allows combining one or more errors together.
//
-// Overview
+// # Overview
//
// Errors can be combined with the use of the Combine function.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// conn.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
//
// If only two errors are being combined, the Append function may be used
// instead.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The underlying list of errors for a returned error object may be retrieved
// with the Errors function.
//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
-// }
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
//
-// Appending from a loop
+// # Appending from a loop
//
// You sometimes need to append into an error from a loop.
//
-// var err error
-// for _, item := range items {
-// err = multierr.Append(err, process(item))
-// }
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
//
// Cases like this may require knowledge of whether an individual instance
// failed. This usually requires introduction of a new variable.
//
-// var err error
-// for _, item := range items {
-// if perr := process(item); perr != nil {
-// log.Warn("skipping item", item)
-// err = multierr.Append(err, perr)
-// }
-// }
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
//
// multierr includes AppendInto to simplify cases like this.
//
-// var err error
-// for _, item := range items {
-// if multierr.AppendInto(&err, process(item)) {
-// log.Warn("skipping item", item)
-// }
-// }
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
//
// This will append the error into the err variable, and return true if that
// individual error was non-nil.
//
-// See AppendInto for more information.
+// See [AppendInto] for more information.
//
-// Deferred Functions
+// # Deferred Functions
//
// Go makes it possible to modify the return value of a function in a defer
// block if the function was using named returns. This makes it possible to
// record resource cleanup failures from deferred blocks.
//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer func() {
-// err = multierr.Append(err, conn.Close())
-// }()
-// // ...
-// }
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
//
// multierr provides the Invoker type and AppendInvoke function to make cases
// like the above simpler and obviate the need for a closure. The following is
// roughly equivalent to the example above.
//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer multierr.AppendInvoke(&err, multierr.Close(conn))
-// // ...
-// }
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
//
-// See AppendInvoke and Invoker for more information.
+// See [AppendInvoke] and [Invoker] for more information.
//
-// Advanced Usage
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
// interface.
//
-// type errorGroup interface {
-// // Returns a slice containing the underlying list of errors.
-// //
-// // This slice MUST NOT be modified by the caller.
-// Errors() []error
-// }
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
//
// Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap
@@ -128,13 +131,13 @@
// because errors returned by Combine and Append are not guaranteed to
// implement this interface.
//
-// var errors []error
-// group, ok := err.(errorGroup)
-// if ok {
-// errors = group.Errors()
-// } else {
-// errors = []error{err}
-// }
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
package multierr // import "go.uber.org/multierr"
import (
@@ -144,8 +147,7 @@ import (
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -185,34 +187,15 @@ type errorGroup interface {
// Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned.
//
-// err := multierr.Append(r.Close(), w.Close())
-// errors := multierr.Errors(err)
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
//
// If the error is not composed of other errors, the returned slice contains
// just the error that was passed in.
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- errors := eg.Errors()
- result := make([]error, len(errors))
- copy(result, errors)
- return result
+ return extractErrors(err)
}
// multiError is an error that holds one or more errors.
@@ -227,8 +210,6 @@ type multiError struct {
errors []error
}
-var _ errorGroup = (*multiError)(nil)
-
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
@@ -239,33 +220,6 @@ func (merr *multiError) Errors() []error {
return merr.errors
}
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
func (merr *multiError) Error() string {
if merr == nil {
return ""
@@ -281,6 +235,17 @@ func (merr *multiError) Error() string {
return result
}
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
@@ -393,8 +358,7 @@ func fromSlice(errors []error) error {
// Otherwise "errors" escapes to the heap
// unconditionally for all other cases.
// This lets us optimize for the "no errors" case.
- out := make([]error, len(errors))
- copy(out, errors)
+ out := append(([]error)(nil), errors...)
return &multiError{errors: out}
}
}
@@ -420,32 +384,32 @@ func fromSlice(errors []error) error {
// If zero arguments were passed or if all items are nil, a nil error is
// returned.
//
-// Combine(nil, nil) // == nil
+// Combine(nil, nil) // == nil
//
// If only a single error was passed, it is returned as-is.
//
-// Combine(err) // == err
+// Combine(err) // == err
//
// Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// pipe.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
//
// If any of the passed errors is a multierr error, it will be flattened along
// with the other errors.
//
-// multierr.Combine(multierr.Combine(err1, err2), err3)
-// // is the same as
-// multierr.Combine(err1, err2, err3)
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
//
// The returned error formats into a readable multi-line error message if
// formatted with %+v.
//
-// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error {
return fromSlice(errors)
}
@@ -455,16 +419,19 @@ func Combine(errors ...error) error {
// This function is a specialization of Combine for the common case where
// there are only two errors.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The following pattern may also be used to record failure of deferred
// operations without losing information about the original error.
//
-// func doSomething(..) (err error) {
-// f := acquireResource()
-// defer func() {
-// err = multierr.Append(err, f.Close())
-// }()
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error {
switch {
case left == nil:
@@ -494,37 +461,37 @@ func Append(left error, right error) error {
// AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil.
//
-// var err error
-// multierr.AppendInto(&err, r.Close())
-// multierr.AppendInto(&err, w.Close())
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
//
// The above is equivalent to,
//
-// err := multierr.Append(r.Close(), w.Close())
+// err := multierr.Append(r.Close(), w.Close())
//
// As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if multierr.AppendInto(&err, parse(line, &item)) {
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
//
// Compare this with a version that relies solely on Append:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if parseErr := parse(line, &item); parseErr != nil {
-// err = multierr.Append(err, parseErr)
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
func AppendInto(into *error, err error) (errored bool) {
if into == nil {
// We panic if 'into' is nil. This is not documented above
@@ -545,7 +512,7 @@ func AppendInto(into *error, err error) (errored bool) {
// AppendInvoke to append the result of calling the function into an error.
// This allows you to conveniently defer capture of failing operations.
//
-// See also, Close and Invoke.
+// See also, [Close] and [Invoke].
type Invoker interface {
Invoke() error
}
@@ -556,19 +523,22 @@ type Invoker interface {
//
// For example,
//
-// func processReader(r io.Reader) (err error) {
-// scanner := bufio.NewScanner(r)
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
-// for scanner.Scan() {
-// // ...
-// }
-// // ...
-// }
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
//
// In this example, the following line will construct the Invoker right away,
// but defer the invocation of scanner.Err() until the function returns.
//
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
type Invoke func() error
// Invoke calls the supplied function and returns its result.
@@ -579,19 +549,22 @@ func (i Invoke) Invoke() error { return i() }
//
// For example,
//
-// func processFile(path string) (err error) {
-// f, err := os.Open(path)
-// if err != nil {
-// return err
-// }
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
-// return processReader(f)
-// }
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
//
// In this example, multierr.Close will construct the Invoker right away, but
// defer the invocation of f.Close until the function returns.
//
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
func Close(closer io.Closer) Invoker {
return Invoke(closer.Close)
}
@@ -601,52 +574,73 @@ func Close(closer io.Closer) Invoker {
// invocation of fallible operations until a function returns, and capture the
// resulting errors.
//
-// func doSomething(...) (err error) {
-// // ...
-// f, err := openFile(..)
-// if err != nil {
-// return err
-// }
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
//
-// // multierr will call f.Close() when this function returns and
-// // if the operation fails, its append its error into the
-// // returned error.
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
//
-// scanner := bufio.NewScanner(f)
-// // Similarly, this scheduled scanner.Err to be called and
-// // inspected when the function returns and append its error
-// // into the returned error.
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// // ...
+// }
//
-// // ...
-// }
+// NOTE: If used with a defer, the error variable MUST be a named return.
//
// Without defer, AppendInvoke behaves exactly like AppendInto.
//
-// err := // ...
-// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
//
-// // ...is roughly equivalent to...
+// // ...is roughly equivalent to...
//
-// err := // ...
-// multierr.AppendInto(&err, foo())
+// err := // ...
+// multierr.AppendInto(&err, foo())
//
// The advantage of the indirection introduced by Invoker is to make it easy
// to defer the invocation of a function. Without this indirection, the
// invoked function will be evaluated at the time of the defer block rather
// than when the function returns.
//
-// // BAD: This is likely not what the caller intended. This will evaluate
-// // foo() right away and append its result into the error when the
-// // function returns.
-// defer multierr.AppendInto(&err, foo())
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
//
-// // GOOD: This will defer invocation of foo unutil the function returns.
-// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
//
// multierr provides a few Invoker implementations out of the box for
-// convenience. See Invoker for more information.
+// convenience. See [Invoker] for more information.
func AppendInvoke(into *error, invoker Invoker) {
AppendInto(into, invoker.Invoke())
}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 000000000..a173f9c25
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 000000000..93872a3fc
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084ec2..000000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
index a2c38b72c..0db1f9f15 100644
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -3,6 +3,31 @@ All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+
## 1.22.0 (8 Aug 2022)
Enhancements:
diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go
index db36ec37f..d0d2c49d6 100644
--- a/vendor/go.uber.org/zap/array_go118.go
+++ b/vendor/go.uber.org/zap/array_go118.go
@@ -23,7 +23,11 @@
package zap
-import "go.uber.org/zap/zapcore"
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
@@ -72,9 +76,9 @@ func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
return nil
}
-// objectMarshalerPtr is a constraint that specifies that the given type
+// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
-type objectMarshalerPtr[T any] interface {
+type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
@@ -101,11 +105,11 @@ type objectMarshalerPtr[T any] interface {
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
-func ObjectValues[T any, P objectMarshalerPtr[T]](key string, values []T) Field {
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
-type objectValues[T any, P objectMarshalerPtr[T]] []T
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
@@ -122,3 +126,31 @@ func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
}
return nil
}
+
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go
new file mode 100644
index 000000000..5f3e3f1b9
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package internal
+
+import "go.uber.org/zap/zapcore"
+
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
index 8f86c430f..db951e19a 100644
--- a/vendor/go.uber.org/zap/level.go
+++ b/vendor/go.uber.org/zap/level.go
@@ -22,6 +22,7 @@ package zap
import (
"go.uber.org/atomic"
+ "go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@@ -70,6 +71,8 @@ type AtomicLevel struct {
l *atomic.Int32
}
+var _ internal.LeveledEnabler = AtomicLevel{}
+
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
index b5f9a99fd..cd44030d1 100644
--- a/vendor/go.uber.org/zap/logger.go
+++ b/vendor/go.uber.org/zap/logger.go
@@ -183,6 +183,13 @@ func (log *Logger) With(fields ...Field) *Logger {
return l
}
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
index 1511166c0..c4f3bca3d 100644
--- a/vendor/go.uber.org/zap/options.go
+++ b/vendor/go.uber.org/zap/options.go
@@ -133,7 +133,8 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
}
// OnFatal sets the action to take on fatal logs.
-// Deprecated: Use WithFatalHook instead.
+//
+// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option {
return WithFatalHook(action)
}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
index df46fa87a..478c9a10f 100644
--- a/vendor/go.uber.org/zap/sink.go
+++ b/vendor/go.uber.org/zap/sink.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@ import (
"io"
"net/url"
"os"
+ "path/filepath"
"strings"
"sync"
@@ -34,23 +35,7 @@ import (
const schemeFile = "file"
-var (
- _sinkMutex sync.RWMutex
- _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
-)
-
-func init() {
- resetSinkRegistry()
-}
-
-func resetSinkRegistry() {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
-
- _sinkFactories = map[string]func(*url.URL) (Sink, error){
- schemeFile: newFileSink,
- }
-}
+var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@@ -58,10 +43,6 @@ type Sink interface {
io.Closer
}
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
type errSinkNotFound struct {
scheme string
}
@@ -70,16 +51,29 @@ func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 3.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@@ -88,14 +82,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
- if _, ok := _sinkFactories[normalized]; ok {
+ if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
- _sinkFactories[normalized] = factory
+ sr.factories[normalized] = factory
return nil
}
-func newSink(rawURL string) (Sink, error) {
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@@ -104,16 +106,27 @@ func newSink(rawURL string) (Sink, error) {
u.Scheme = schemeFile
}
- _sinkMutex.RLock()
- factory, ok := _sinkFactories[u.Scheme]
- _sinkMutex.RUnlock()
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
-func newFileSink(u *url.URL) (Sink, error) {
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@@ -130,13 +143,18 @@ func newFileSink(u *url.URL) (Sink, error) {
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
- switch u.Path {
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
- return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
}
func normalizeScheme(s string) (string, error) {
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
index 3d187fa56..817a3bde8 100644
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ b/vendor/go.uber.org/zap/stacktrace.go
@@ -154,7 +154,7 @@ func newStackFormatter(b *buffer.Buffer) stackFormatter {
// the final runtime.main/runtime.goexit frame.
func (sf *stackFormatter) FormatStack(stack *stacktrace) {
// Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a a runtime frame which
+ // frame, but we ignore this frame. The last frame is a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := stack.Next(); more; frame, more = stack.Next() {
sf.FormatFrame(frame)
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
index c450b2dda..ac387b3e4 100644
--- a/vendor/go.uber.org/zap/sugar.go
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -31,6 +31,7 @@ import (
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@@ -114,6 +115,13 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
// Debug uses fmt.Sprint to construct and log a message.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
@@ -329,10 +337,13 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
return nil
}
- // Allocate enough space for the worst case; if users pass only structured
- // fields, we shouldn't penalize them with extra allocations.
- fields := make([]Field, 0, len(args))
- var invalid invalidPairs
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@@ -342,6 +353,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
continue
}
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
index 00eba4ed7..f08728e1e 100644
--- a/vendor/go.uber.org/zap/writer.go
+++ b/vendor/go.uber.org/zap/writer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -68,9 +68,9 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
var openErr error
for _, path := range paths {
- sink, err := newSink(path)
+ sink, err := _sinkRegistry.newSink(path)
if err != nil {
- openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
index a1ef8b034..9dfd64051 100644
--- a/vendor/go.uber.org/zap/zapcore/core.go
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index ea0431eb3..9d326e95e 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -281,7 +281,8 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
-// Deprecated: Use After(ent Entry, after CheckWriteHook) instead.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
index 5db4afb30..198def991 100644
--- a/vendor/go.uber.org/zap/zapcore/hook.go
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
index 5a1749261..7a11237ae 100644
--- a/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index 56e88dc0c..e01a24131 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -53,6 +53,11 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
@@ -67,6 +72,43 @@ func ParseLevel(text string) (Level, error) {
return level, err
}
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index a15b7c910..dc518055a 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -175,6 +175,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@@ -192,6 +197,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
index 07a32eef9..9bb32f055 100644
--- a/vendor/go.uber.org/zap/zapcore/tee.go
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {
diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
deleted file mode 100644
index 2b00ddba0..000000000
--- a/vendor/golang.org/x/crypto/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
deleted file mode 100644
index 1fbd3e976..000000000
--- a/vendor/golang.org/x/crypto/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
index a2ecf5c32..93eb5ae6d 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
@@ -12,7 +12,7 @@ import (
"errors"
"math/bits"
- "golang.org/x/crypto/internal/subtle"
+ "golang.org/x/crypto/internal/alias"
)
const (
@@ -189,7 +189,7 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
panic("chacha20: output smaller than input")
}
dst = dst[:len(src)]
- if subtle.InexactOverlap(dst, src) {
+ if alias.InexactOverlap(dst, src) {
panic("chacha20: invalid buffer overlap")
}
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
index 25959b9a6..0c408c570 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
@@ -10,7 +10,7 @@ package chacha20poly1305
import (
"encoding/binary"
- "golang.org/x/crypto/internal/subtle"
+ "golang.org/x/crypto/internal/alias"
"golang.org/x/sys/cpu"
)
@@ -56,7 +56,7 @@ func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []
setupState(&state, &c.key, nonce)
ret, out := sliceForAppend(dst, len(plaintext)+16)
- if subtle.InexactOverlap(out, plaintext) {
+ if alias.InexactOverlap(out, plaintext) {
panic("chacha20poly1305: invalid buffer overlap")
}
chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData)
@@ -73,7 +73,7 @@ func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) (
ciphertext = ciphertext[:len(ciphertext)-16]
ret, out := sliceForAppend(dst, len(ciphertext))
- if subtle.InexactOverlap(out, ciphertext) {
+ if alias.InexactOverlap(out, ciphertext) {
panic("chacha20poly1305: invalid buffer overlap")
}
if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) {
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
index 96b2fd898..6313898f0 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
@@ -8,8 +8,8 @@ import (
"encoding/binary"
"golang.org/x/crypto/chacha20"
+ "golang.org/x/crypto/internal/alias"
"golang.org/x/crypto/internal/poly1305"
- "golang.org/x/crypto/internal/subtle"
)
func writeWithPadding(p *poly1305.MAC, b []byte) {
@@ -30,7 +30,7 @@ func writeUint64(p *poly1305.MAC, n int) {
func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte {
ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize)
ciphertext, tag := out[:len(plaintext)], out[len(plaintext):]
- if subtle.InexactOverlap(out, plaintext) {
+ if alias.InexactOverlap(out, plaintext) {
panic("chacha20poly1305: invalid buffer overlap")
}
@@ -66,7 +66,7 @@ func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []
writeUint64(p, len(ciphertext))
ret, out := sliceForAppend(dst, len(ciphertext))
- if subtle.InexactOverlap(out, ciphertext) {
+ if alias.InexactOverlap(out, ciphertext) {
panic("chacha20poly1305: invalid buffer overlap")
}
if !p.Verify(tag) {
diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go
index 3a1674a1e..3141a7f1b 100644
--- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go
+++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -264,36 +264,35 @@ func (s *String) ReadASN1Boolean(out *bool) bool {
return true
}
-var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
-
// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
-// not point to an integer or to a big.Int, it panics. It reports whether the
-// read was successful.
+// not point to an integer, to a big.Int, or to a []byte it panics. Only
+// positive and zero values can be decoded into []byte, and they are returned as
+// big-endian binary values that share memory with s. Positive values will have
+// no leading zeroes, and zero will be returned as a single zero byte.
+// ReadASN1Integer reports whether the read was successful.
func (s *String) ReadASN1Integer(out interface{}) bool {
- if reflect.TypeOf(out).Kind() != reflect.Ptr {
- panic("out is not a pointer")
- }
- switch reflect.ValueOf(out).Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch out := out.(type) {
+ case *int, *int8, *int16, *int32, *int64:
var i int64
if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
return false
}
reflect.ValueOf(out).Elem().SetInt(i)
return true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ case *uint, *uint8, *uint16, *uint32, *uint64:
var u uint64
if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
return false
}
reflect.ValueOf(out).Elem().SetUint(u)
return true
- case reflect.Struct:
- if reflect.TypeOf(out).Elem() == bigIntType {
- return s.readASN1BigInt(out.(*big.Int))
- }
+ case *big.Int:
+ return s.readASN1BigInt(out)
+ case *[]byte:
+ return s.readASN1Bytes(out)
+ default:
+ panic("out does not point to an integer type")
}
- panic("out does not point to an integer type")
}
func checkASN1Integer(bytes []byte) bool {
@@ -333,6 +332,21 @@ func (s *String) readASN1BigInt(out *big.Int) bool {
return true
}
+func (s *String) readASN1Bytes(out *[]byte) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
+ return false
+ }
+ if bytes[0]&0x80 == 0x80 {
+ return false
+ }
+ for len(bytes) > 1 && bytes[0] == 0 {
+ bytes = bytes[1:]
+ }
+ *out = bytes
+ return true
+}
+
func (s *String) readASN1Int64(out *int64) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
@@ -532,7 +546,7 @@ func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
return false
}
- paddingBits := uint8(bytes[0])
+ paddingBits := bytes[0]
bytes = bytes[1:]
if paddingBits > 7 ||
len(bytes) == 0 && paddingBits != 0 ||
@@ -545,7 +559,7 @@ func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
return true
}
-// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. It is
+// ReadASN1BitStringAsBytes decodes an ASN.1 BIT STRING into out and advances. It is
// an error if the BIT STRING is not a whole number of bytes. It reports
// whether the read was successful.
func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
@@ -554,7 +568,7 @@ func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
return false
}
- paddingBits := uint8(bytes[0])
+ paddingBits := bytes[0]
if paddingBits != 0 {
return false
}
@@ -654,34 +668,27 @@ func (s *String) SkipOptionalASN1(tag asn1.Tag) bool {
return s.ReadASN1(&unused, tag)
}
-// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER
-// explicitly tagged with tag into out and advances. If no element with a
-// matching tag is present, it writes defaultValue into out instead. If out
-// does not point to an integer or to a big.Int, it panics. It reports
-// whether the read was successful.
+// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER explicitly
+// tagged with tag into out and advances. If no element with a matching tag is
+// present, it writes defaultValue into out instead. Otherwise, it behaves like
+// ReadASN1Integer.
func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool {
- if reflect.TypeOf(out).Kind() != reflect.Ptr {
- panic("out is not a pointer")
- }
var present bool
var i String
if !s.ReadOptionalASN1(&i, &present, tag) {
return false
}
if !present {
- switch reflect.ValueOf(out).Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ switch out.(type) {
+ case *int, *int8, *int16, *int32, *int64,
+ *uint, *uint8, *uint16, *uint32, *uint64, *[]byte:
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue))
- case reflect.Struct:
- if reflect.TypeOf(out).Elem() != bigIntType {
- panic("invalid integer type")
- }
- if reflect.TypeOf(defaultValue).Kind() != reflect.Ptr ||
- reflect.TypeOf(defaultValue).Elem() != bigIntType {
+ case *big.Int:
+ if defaultValue, ok := defaultValue.(*big.Int); ok {
+ out.(*big.Int).Set(defaultValue)
+ } else {
panic("out points to big.Int, but defaultValue does not")
}
- out.(*big.Int).Set(defaultValue.(*big.Int))
default:
panic("invalid integer type")
}
diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go
index c7ded7577..c05ac7d16 100644
--- a/vendor/golang.org/x/crypto/cryptobyte/builder.go
+++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -95,6 +95,11 @@ func (b *Builder) AddUint32(v uint32) {
b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
+// AddUint64 appends a big-endian, 64-bit value to the byte string.
+func (b *Builder) AddUint64(v uint64) {
+ b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
// AddBytes appends a sequence of bytes to the byte string.
func (b *Builder) AddBytes(v []byte) {
b.add(v...)
@@ -298,9 +303,9 @@ func (b *Builder) add(bytes ...byte) {
b.result = append(b.result, bytes...)
}
-// Unwrite rolls back n bytes written directly to the Builder. An attempt by a
-// child builder passed to a continuation to unwrite bytes from its parent will
-// panic.
+// Unwrite rolls back non-negative n bytes written directly to the Builder.
+// An attempt by a child builder passed to a continuation to unwrite bytes
+// from its parent will panic.
func (b *Builder) Unwrite(n int) {
if b.err != nil {
return
@@ -312,6 +317,9 @@ func (b *Builder) Unwrite(n int) {
if length < 0 {
panic("cryptobyte: internal error")
}
+ if n < 0 {
+ panic("cryptobyte: attempted to unwrite negative number of bytes")
+ }
if n > length {
panic("cryptobyte: attempted to unwrite more than was written")
}
diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go
index 589d297e6..0531a3d6f 100644
--- a/vendor/golang.org/x/crypto/cryptobyte/string.go
+++ b/vendor/golang.org/x/crypto/cryptobyte/string.go
@@ -81,6 +81,17 @@ func (s *String) ReadUint32(out *uint32) bool {
return true
}
+// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint64(out *uint64) bool {
+ v := s.read(8)
+ if v == nil {
+ return false
+ }
+ *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7])
+ return true
+}
+
func (s *String) readUnsigned(out *uint32, length int) bool {
v := s.read(length)
if v == nil {
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index cda3fdd35..bc62161d6 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
- "fmt"
+ "errors"
+ "strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
@@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) {
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
- return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32)
+ return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
- return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32)
+ return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
@@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
copy(base[:], point)
ScalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
- return nil, fmt.Errorf("bad input point: low order point")
+ return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil
diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
index 7b5b78cbd..2671217da 100644
--- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
+++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
@@ -245,7 +245,7 @@ func feSquareGeneric(v, a *Element) {
v.carryPropagate()
}
-// carryPropagate brings the limbs below 52 bits by applying the reduction
+// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline
func (v *Element) carryPropagateGeneric() *Element {
c0 := v.l0 >> 51
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/alias/alias.go
similarity index 84%
rename from vendor/golang.org/x/crypto/internal/subtle/aliasing.go
rename to vendor/golang.org/x/crypto/internal/alias/alias.go
index 4fad24f8d..69c17f822 100644
--- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
+++ b/vendor/golang.org/x/crypto/internal/alias/alias.go
@@ -5,9 +5,8 @@
//go:build !purego
// +build !purego
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle // import "golang.org/x/crypto/internal/subtle"
+// Package alias implements memory aliasing tests.
+package alias
import "unsafe"
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
similarity index 86%
rename from vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
rename to vendor/golang.org/x/crypto/internal/alias/alias_purego.go
index 80ccbed2c..4775b0a43 100644
--- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
+++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
@@ -5,9 +5,8 @@
//go:build purego
// +build purego
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle // import "golang.org/x/crypto/internal/subtle"
+// Package alias implements memory aliasing tests.
+package alias
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
index 4c96147c8..3fd05b275 100644
--- a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
@@ -5,6 +5,8 @@
// Package salsa provides low-level access to functions in the Salsa family.
package salsa // import "golang.org/x/crypto/salsa20/salsa"
+import "math/bits"
+
// Sigma is the Salsa20 constant for 256-bit keys.
var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
@@ -31,76 +33,76 @@ func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
for i := 0; i < 20; i += 2 {
u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
+ x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
+ x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
+ x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
+ x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
+ x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
+ x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
+ x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
+ x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
+ x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
+ x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
+ x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
+ x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
+ x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
+ x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
+ x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
+ x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
}
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
index 9bfc0927c..7ec7bb39b 100644
--- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
@@ -4,6 +4,8 @@
package salsa
+import "math/bits"
+
// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
// the result into the 64-byte array out. The input and output may be the same array.
func Core208(out *[64]byte, in *[64]byte) {
@@ -29,76 +31,76 @@ func Core208(out *[64]byte, in *[64]byte) {
for i := 0; i < 8; i += 2 {
u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
+ x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
+ x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
+ x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
+ x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
+ x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
+ x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
+ x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
+ x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
+ x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
+ x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
+ x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
+ x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
+ x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
+ x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
+ x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
+ x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
}
x0 += j0
x1 += j1
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
index 68169c6d6..e5cdb9a25 100644
--- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
@@ -4,6 +4,8 @@
package salsa
+import "math/bits"
+
const rounds = 20
// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
@@ -31,76 +33,76 @@ func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
for i := 0; i < rounds; i += 2 {
u := x0 + x12
- x4 ^= u<<7 | u>>(32-7)
+ x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
- x12 ^= u<<13 | u>>(32-13)
+ x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
- x9 ^= u<<7 | u>>(32-7)
+ x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
- x1 ^= u<<13 | u>>(32-13)
+ x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
- x14 ^= u<<7 | u>>(32-7)
+ x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
- x6 ^= u<<13 | u>>(32-13)
+ x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
- x3 ^= u<<7 | u>>(32-7)
+ x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
- x11 ^= u<<13 | u>>(32-13)
+ x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
- x1 ^= u<<7 | u>>(32-7)
+ x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
- x2 ^= u<<9 | u>>(32-9)
+ x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
- x3 ^= u<<13 | u>>(32-13)
+ x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
- x0 ^= u<<18 | u>>(32-18)
+ x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
- x6 ^= u<<7 | u>>(32-7)
+ x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
- x7 ^= u<<9 | u>>(32-9)
+ x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
- x4 ^= u<<13 | u>>(32-13)
+ x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
- x5 ^= u<<18 | u>>(32-18)
+ x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
- x11 ^= u<<7 | u>>(32-7)
+ x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
- x8 ^= u<<9 | u>>(32-9)
+ x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
- x9 ^= u<<13 | u>>(32-13)
+ x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
- x10 ^= u<<18 | u>>(32-18)
+ x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
- x12 ^= u<<7 | u>>(32-7)
+ x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
- x13 ^= u<<9 | u>>(32-9)
+ x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
- x14 ^= u<<13 | u>>(32-13)
+ x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
- x15 ^= u<<18 | u>>(32-18)
+ x15 ^= bits.RotateLeft32(u, 18)
}
x0 += j0
x1 += j1
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go
index 0f4ae8bac..e5faa375c 100644
--- a/vendor/golang.org/x/crypto/sha3/keccakf.go
+++ b/vendor/golang.org/x/crypto/sha3/keccakf.go
@@ -7,6 +7,8 @@
package sha3
+import "math/bits"
+
// rc stores the round constants for use in the ι step.
var rc = [24]uint64{
0x0000000000000001,
@@ -60,13 +62,13 @@ func keccakF1600(a *[25]uint64) {
bc0 = a[0] ^ d0
t = a[6] ^ d1
- bc1 = t<<44 | t>>(64-44)
+ bc1 = bits.RotateLeft64(t, 44)
t = a[12] ^ d2
- bc2 = t<<43 | t>>(64-43)
+ bc2 = bits.RotateLeft64(t, 43)
t = a[18] ^ d3
- bc3 = t<<21 | t>>(64-21)
+ bc3 = bits.RotateLeft64(t, 21)
t = a[24] ^ d4
- bc4 = t<<14 | t>>(64-14)
+ bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
@@ -74,15 +76,15 @@ func keccakF1600(a *[25]uint64) {
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
- bc2 = t<<3 | t>>(64-3)
+ bc2 = bits.RotateLeft64(t, 3)
t = a[16] ^ d1
- bc3 = t<<45 | t>>(64-45)
+ bc3 = bits.RotateLeft64(t, 45)
t = a[22] ^ d2
- bc4 = t<<61 | t>>(64-61)
+ bc4 = bits.RotateLeft64(t, 61)
t = a[3] ^ d3
- bc0 = t<<28 | t>>(64-28)
+ bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
+ bc1 = bits.RotateLeft64(t, 20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
@@ -90,15 +92,15 @@ func keccakF1600(a *[25]uint64) {
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
- bc4 = t<<18 | t>>(64-18)
+ bc4 = bits.RotateLeft64(t, 18)
t = a[1] ^ d1
- bc0 = t<<1 | t>>(64-1)
+ bc0 = bits.RotateLeft64(t, 1)
t = a[7] ^ d2
- bc1 = t<<6 | t>>(64-6)
+ bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
+ bc2 = bits.RotateLeft64(t, 25)
t = a[19] ^ d4
- bc3 = t<<8 | t>>(64-8)
+ bc3 = bits.RotateLeft64(t, 8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
@@ -106,15 +108,15 @@ func keccakF1600(a *[25]uint64) {
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
- bc1 = t<<36 | t>>(64-36)
+ bc1 = bits.RotateLeft64(t, 36)
t = a[11] ^ d1
- bc2 = t<<10 | t>>(64-10)
+ bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
+ bc3 = bits.RotateLeft64(t, 15)
t = a[23] ^ d3
- bc4 = t<<56 | t>>(64-56)
+ bc4 = bits.RotateLeft64(t, 56)
t = a[4] ^ d4
- bc0 = t<<27 | t>>(64-27)
+ bc0 = bits.RotateLeft64(t, 27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
@@ -122,15 +124,15 @@ func keccakF1600(a *[25]uint64) {
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
- bc3 = t<<41 | t>>(64-41)
+ bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
+ bc4 = bits.RotateLeft64(t, 2)
t = a[2] ^ d2
- bc0 = t<<62 | t>>(64-62)
+ bc0 = bits.RotateLeft64(t, 62)
t = a[8] ^ d3
- bc1 = t<<55 | t>>(64-55)
+ bc1 = bits.RotateLeft64(t, 55)
t = a[14] ^ d4
- bc2 = t<<39 | t>>(64-39)
+ bc2 = bits.RotateLeft64(t, 39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
@@ -151,13 +153,13 @@ func keccakF1600(a *[25]uint64) {
bc0 = a[0] ^ d0
t = a[16] ^ d1
- bc1 = t<<44 | t>>(64-44)
+ bc1 = bits.RotateLeft64(t, 44)
t = a[7] ^ d2
- bc2 = t<<43 | t>>(64-43)
+ bc2 = bits.RotateLeft64(t, 43)
t = a[23] ^ d3
- bc3 = t<<21 | t>>(64-21)
+ bc3 = bits.RotateLeft64(t, 21)
t = a[14] ^ d4
- bc4 = t<<14 | t>>(64-14)
+ bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
@@ -165,15 +167,15 @@ func keccakF1600(a *[25]uint64) {
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
- bc2 = t<<3 | t>>(64-3)
+ bc2 = bits.RotateLeft64(t, 3)
t = a[11] ^ d1
- bc3 = t<<45 | t>>(64-45)
+ bc3 = bits.RotateLeft64(t, 45)
t = a[2] ^ d2
- bc4 = t<<61 | t>>(64-61)
+ bc4 = bits.RotateLeft64(t, 61)
t = a[18] ^ d3
- bc0 = t<<28 | t>>(64-28)
+ bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
+ bc1 = bits.RotateLeft64(t, 20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
@@ -181,15 +183,15 @@ func keccakF1600(a *[25]uint64) {
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
- bc4 = t<<18 | t>>(64-18)
+ bc4 = bits.RotateLeft64(t, 18)
t = a[6] ^ d1
- bc0 = t<<1 | t>>(64-1)
+ bc0 = bits.RotateLeft64(t, 1)
t = a[22] ^ d2
- bc1 = t<<6 | t>>(64-6)
+ bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
+ bc2 = bits.RotateLeft64(t, 25)
t = a[4] ^ d4
- bc3 = t<<8 | t>>(64-8)
+ bc3 = bits.RotateLeft64(t, 8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
@@ -197,15 +199,15 @@ func keccakF1600(a *[25]uint64) {
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
- bc1 = t<<36 | t>>(64-36)
+ bc1 = bits.RotateLeft64(t, 36)
t = a[1] ^ d1
- bc2 = t<<10 | t>>(64-10)
+ bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
+ bc3 = bits.RotateLeft64(t, 15)
t = a[8] ^ d3
- bc4 = t<<56 | t>>(64-56)
+ bc4 = bits.RotateLeft64(t, 56)
t = a[24] ^ d4
- bc0 = t<<27 | t>>(64-27)
+ bc0 = bits.RotateLeft64(t, 27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
@@ -213,15 +215,15 @@ func keccakF1600(a *[25]uint64) {
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
- bc3 = t<<41 | t>>(64-41)
+ bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
+ bc4 = bits.RotateLeft64(t, 2)
t = a[12] ^ d2
- bc0 = t<<62 | t>>(64-62)
+ bc0 = bits.RotateLeft64(t, 62)
t = a[3] ^ d3
- bc1 = t<<55 | t>>(64-55)
+ bc1 = bits.RotateLeft64(t, 55)
t = a[19] ^ d4
- bc2 = t<<39 | t>>(64-39)
+ bc2 = bits.RotateLeft64(t, 39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
@@ -242,13 +244,13 @@ func keccakF1600(a *[25]uint64) {
bc0 = a[0] ^ d0
t = a[11] ^ d1
- bc1 = t<<44 | t>>(64-44)
+ bc1 = bits.RotateLeft64(t, 44)
t = a[22] ^ d2
- bc2 = t<<43 | t>>(64-43)
+ bc2 = bits.RotateLeft64(t, 43)
t = a[8] ^ d3
- bc3 = t<<21 | t>>(64-21)
+ bc3 = bits.RotateLeft64(t, 21)
t = a[19] ^ d4
- bc4 = t<<14 | t>>(64-14)
+ bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
@@ -256,15 +258,15 @@ func keccakF1600(a *[25]uint64) {
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
- bc2 = t<<3 | t>>(64-3)
+ bc2 = bits.RotateLeft64(t, 3)
t = a[1] ^ d1
- bc3 = t<<45 | t>>(64-45)
+ bc3 = bits.RotateLeft64(t, 45)
t = a[12] ^ d2
- bc4 = t<<61 | t>>(64-61)
+ bc4 = bits.RotateLeft64(t, 61)
t = a[23] ^ d3
- bc0 = t<<28 | t>>(64-28)
+ bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
+ bc1 = bits.RotateLeft64(t, 20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
@@ -272,15 +274,15 @@ func keccakF1600(a *[25]uint64) {
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
- bc4 = t<<18 | t>>(64-18)
+ bc4 = bits.RotateLeft64(t, 18)
t = a[16] ^ d1
- bc0 = t<<1 | t>>(64-1)
+ bc0 = bits.RotateLeft64(t, 1)
t = a[2] ^ d2
- bc1 = t<<6 | t>>(64-6)
+ bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
+ bc2 = bits.RotateLeft64(t, 25)
t = a[24] ^ d4
- bc3 = t<<8 | t>>(64-8)
+ bc3 = bits.RotateLeft64(t, 8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
@@ -288,15 +290,15 @@ func keccakF1600(a *[25]uint64) {
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
- bc1 = t<<36 | t>>(64-36)
+ bc1 = bits.RotateLeft64(t, 36)
t = a[6] ^ d1
- bc2 = t<<10 | t>>(64-10)
+ bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
+ bc3 = bits.RotateLeft64(t, 15)
t = a[3] ^ d3
- bc4 = t<<56 | t>>(64-56)
+ bc4 = bits.RotateLeft64(t, 56)
t = a[14] ^ d4
- bc0 = t<<27 | t>>(64-27)
+ bc0 = bits.RotateLeft64(t, 27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
@@ -304,15 +306,15 @@ func keccakF1600(a *[25]uint64) {
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
- bc3 = t<<41 | t>>(64-41)
+ bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
+ bc4 = bits.RotateLeft64(t, 2)
t = a[7] ^ d2
- bc0 = t<<62 | t>>(64-62)
+ bc0 = bits.RotateLeft64(t, 62)
t = a[18] ^ d3
- bc1 = t<<55 | t>>(64-55)
+ bc1 = bits.RotateLeft64(t, 55)
t = a[4] ^ d4
- bc2 = t<<39 | t>>(64-39)
+ bc2 = bits.RotateLeft64(t, 39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
@@ -333,13 +335,13 @@ func keccakF1600(a *[25]uint64) {
bc0 = a[0] ^ d0
t = a[1] ^ d1
- bc1 = t<<44 | t>>(64-44)
+ bc1 = bits.RotateLeft64(t, 44)
t = a[2] ^ d2
- bc2 = t<<43 | t>>(64-43)
+ bc2 = bits.RotateLeft64(t, 43)
t = a[3] ^ d3
- bc3 = t<<21 | t>>(64-21)
+ bc3 = bits.RotateLeft64(t, 21)
t = a[4] ^ d4
- bc4 = t<<14 | t>>(64-14)
+ bc4 = bits.RotateLeft64(t, 14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
@@ -347,15 +349,15 @@ func keccakF1600(a *[25]uint64) {
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
- bc2 = t<<3 | t>>(64-3)
+ bc2 = bits.RotateLeft64(t, 3)
t = a[6] ^ d1
- bc3 = t<<45 | t>>(64-45)
+ bc3 = bits.RotateLeft64(t, 45)
t = a[7] ^ d2
- bc4 = t<<61 | t>>(64-61)
+ bc4 = bits.RotateLeft64(t, 61)
t = a[8] ^ d3
- bc0 = t<<28 | t>>(64-28)
+ bc0 = bits.RotateLeft64(t, 28)
t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
+ bc1 = bits.RotateLeft64(t, 20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
@@ -363,15 +365,15 @@ func keccakF1600(a *[25]uint64) {
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
- bc4 = t<<18 | t>>(64-18)
+ bc4 = bits.RotateLeft64(t, 18)
t = a[11] ^ d1
- bc0 = t<<1 | t>>(64-1)
+ bc0 = bits.RotateLeft64(t, 1)
t = a[12] ^ d2
- bc1 = t<<6 | t>>(64-6)
+ bc1 = bits.RotateLeft64(t, 6)
t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
+ bc2 = bits.RotateLeft64(t, 25)
t = a[14] ^ d4
- bc3 = t<<8 | t>>(64-8)
+ bc3 = bits.RotateLeft64(t, 8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
@@ -379,15 +381,15 @@ func keccakF1600(a *[25]uint64) {
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
- bc1 = t<<36 | t>>(64-36)
+ bc1 = bits.RotateLeft64(t, 36)
t = a[16] ^ d1
- bc2 = t<<10 | t>>(64-10)
+ bc2 = bits.RotateLeft64(t, 10)
t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
+ bc3 = bits.RotateLeft64(t, 15)
t = a[18] ^ d3
- bc4 = t<<56 | t>>(64-56)
+ bc4 = bits.RotateLeft64(t, 56)
t = a[19] ^ d4
- bc0 = t<<27 | t>>(64-27)
+ bc0 = bits.RotateLeft64(t, 27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
@@ -395,15 +397,15 @@ func keccakF1600(a *[25]uint64) {
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
- bc3 = t<<41 | t>>(64-41)
+ bc3 = bits.RotateLeft64(t, 41)
t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
+ bc4 = bits.RotateLeft64(t, 2)
t = a[22] ^ d2
- bc0 = t<<62 | t>>(64-62)
+ bc0 = bits.RotateLeft64(t, 62)
t = a[23] ^ d3
- bc1 = t<<55 | t>>(64-55)
+ bc1 = bits.RotateLeft64(t, 55)
t = a[24] ^ d4
- bc2 = t<<39 | t>>(64-39)
+ bc2 = bits.RotateLeft64(t, 39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/LICENSE b/vendor/golang.org/x/exp/LICENSE
similarity index 100%
rename from vendor/github.com/marten-seemann/qtls-go1-18/LICENSE
rename to vendor/golang.org/x/exp/LICENSE
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000..2c033dff4
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000..cff0cd49e
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ switch {
+ case v1 < v2:
+ return -1
+ case v1 > v2:
+ return +1
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+ for i, vs := range s {
+ if v == vs {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+ for i, v := range s {
+ if f(v) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[E any](s []E, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ tot := len(s) + len(v)
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[i:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[i:])
+ return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[j:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if v != last {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if !eq(v, last) {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000..f14f40da7
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+ n := len(x)
+ pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+ stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if x[i] < x[i-1] {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if less(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if x[h] < target {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && x[i] == target
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing" is
+// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
+// parameters: 0 if a == b, a negative number if a < b and a positive number if
+// a > b.
+func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 000000000..2a632476c
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && less(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !less(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownLessFunc(data, i, hi, first, less)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownLessFunc(data, lo, i, first, less)
+ }
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsLessFunc(data, a, b, less)
+ limit--
+ }
+
+ pivot, hint := choosePivotLessFunc(data, a, b, less)
+ if hint == decreasingHint {
+ reverseRangeLessFunc(data, a, b, less)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortLessFunc(data, a, b, less) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !less(data[a-1], data[pivot]) {
+ mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortLessFunc(data, a, mid, limit, less)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortLessFunc(data, mid+1, b, limit, less)
+ b = mid
+ }
+ }
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !less(data[a], data[i]) {
+ i++
+ }
+ for i <= j && less(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !less(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentLessFunc(data, i, &swaps, less)
+ j = medianAdjacentLessFunc(data, j, &swaps, less)
+ k = medianAdjacentLessFunc(data, k, &swaps, less)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianLessFunc(data, i, j, k, &swaps, less)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+ if less(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ b, c = order2LessFunc(data, b, c, swaps, less)
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+ return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortLessFunc(data, a, b, less)
+ a = b
+ b += blockSize
+ }
+ insertionSortLessFunc(data, a, n, less)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeLessFunc(data, a, a+blockSize, b, less)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeLessFunc(data, a, m, n, less)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if less(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !less(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !less(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateLessFunc(data, start, m, end, less)
+ }
+ if a < start && start < mid {
+ symMergeLessFunc(data, a, start, mid, less)
+ }
+ if mid < end && end < b {
+ symMergeLessFunc(data, mid, end, b, less)
+ }
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeLessFunc(data, m-i, m, j, less)
+ i -= j
+ } else {
+ swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000..efaa1c8b7
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j] < data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ child++
+ }
+ if !(data[first+root] < data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(data[a-1] < data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(data[a] < data[i]) {
+ i++
+ }
+ for i <= j && (data[a] < data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(data[i] < data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if data[b] < data[a] {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data[h] < data[a] {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(data[m] < data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(data[p-c] < data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go
new file mode 100644
index 000000000..524f93022
--- /dev/null
+++ b/vendor/golang.org/x/mod/modfile/print.go
@@ -0,0 +1,174 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Module file printer.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// Format returns a go.mod file as a byte slice, formatted in standard style.
+func Format(f *FileSyntax) []byte {
+ pr := &printer{}
+ pr.file(f)
+ return pr.Bytes()
+}
+
+// A printer collects the state during printing of a file or expression.
+type printer struct {
+ bytes.Buffer // output buffer
+ comment []Comment // pending end-of-line comments
+ margin int // left margin (indent), a number of tabs
+}
+
+// printf prints to the buffer.
+func (p *printer) printf(format string, args ...interface{}) {
+ fmt.Fprintf(p, format, args...)
+}
+
+// indent returns the position on the current line, in bytes, 0-indexed.
+func (p *printer) indent() int {
+ b := p.Bytes()
+ n := 0
+ for n < len(b) && b[len(b)-1-n] != '\n' {
+ n++
+ }
+ return n
+}
+
+// newline ends the current line, flushing end-of-line comments.
+func (p *printer) newline() {
+ if len(p.comment) > 0 {
+ p.printf(" ")
+ for i, com := range p.comment {
+ if i > 0 {
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ }
+ p.printf("%s", strings.TrimSpace(com.Token))
+ }
+ p.comment = p.comment[:0]
+ }
+
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+}
+
+// trim removes trailing spaces and tabs from the current line.
+func (p *printer) trim() {
+ // Remove trailing spaces and tabs from line we're about to end.
+ b := p.Bytes()
+ n := len(b)
+ for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
+ n--
+ }
+ p.Truncate(n)
+}
+
+// file formats the given file into the print buffer.
+func (p *printer) file(f *FileSyntax) {
+ for _, com := range f.Before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ for i, stmt := range f.Stmt {
+ switch x := stmt.(type) {
+ case *CommentBlock:
+ // comments already handled
+ p.expr(x)
+
+ default:
+ p.expr(x)
+ p.newline()
+ }
+
+ for _, com := range stmt.Comment().After {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ if i+1 < len(f.Stmt) {
+ p.newline()
+ }
+ }
+}
+
+func (p *printer) expr(x Expr) {
+ // Emit line-comments preceding this expression.
+ if before := x.Comment().Before; len(before) > 0 {
+ // Want to print a line comment.
+ // Line comments must be at the current margin.
+ p.trim()
+ if p.indent() > 0 {
+ // There's other text on the line. Start a new line.
+ p.printf("\n")
+ }
+ // Re-indent to margin.
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ for _, com := range before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+ }
+
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("printer: unexpected type %T", x))
+
+ case *CommentBlock:
+ // done
+
+ case *LParen:
+ p.printf("(")
+ case *RParen:
+ p.printf(")")
+
+ case *Line:
+ p.tokens(x.Token)
+
+ case *LineBlock:
+ p.tokens(x.Token)
+ p.printf(" ")
+ p.expr(&x.LParen)
+ p.margin++
+ for _, l := range x.Line {
+ p.newline()
+ p.expr(l)
+ }
+ p.margin--
+ p.newline()
+ p.expr(&x.RParen)
+ }
+
+ // Queue end-of-line comments for printing when we
+ // reach the end of the line.
+ p.comment = append(p.comment, x.Comment().Suffix...)
+}
+
+func (p *printer) tokens(tokens []string) {
+ sep := ""
+ for _, t := range tokens {
+ if t == "," || t == ")" || t == "]" || t == "}" {
+ sep = ""
+ }
+ p.printf("%s%s", sep, t)
+ sep = " "
+ if t == "(" || t == "[" || t == "{" {
+ sep = ""
+ }
+ }
+}
diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go
new file mode 100644
index 000000000..a503bc210
--- /dev/null
+++ b/vendor/golang.org/x/mod/modfile/read.go
@@ -0,0 +1,958 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Position describes an arbitrary source position in a file, including the
+// file, line, column, and byte offset.
+type Position struct {
+ Line int // line in input (starting at 1)
+ LineRune int // rune in line (starting at 1)
+ Byte int // byte in input (starting at 0)
+}
+
+// add returns the position at the end of s, assuming it starts at p.
+func (p Position) add(s string) Position {
+ p.Byte += len(s)
+ if n := strings.Count(s, "\n"); n > 0 {
+ p.Line += n
+ s = s[strings.LastIndex(s, "\n")+1:]
+ p.LineRune = 1
+ }
+ p.LineRune += utf8.RuneCountInString(s)
+ return p
+}
+
+// An Expr represents an input element.
+type Expr interface {
+ // Span returns the start and end position of the expression,
+ // excluding leading or trailing comments.
+ Span() (start, end Position)
+
+ // Comment returns the comments attached to the expression.
+ // This method would normally be named 'Comments' but that
+ // would interfere with embedding a type of the same name.
+ Comment() *Comments
+}
+
+// A Comment represents a single // comment.
+type Comment struct {
+ Start Position
+ Token string // without trailing newline
+ Suffix bool // an end of line (not whole line) comment
+}
+
+// Comments collects the comments associated with an expression.
+type Comments struct {
+ Before []Comment // whole-line comments before this expression
+ Suffix []Comment // end-of-line comments after this expression
+
+ // For top-level expressions only, After lists whole-line
+ // comments following the expression.
+ After []Comment
+}
+
+// Comment returns the receiver. This isn't useful by itself, but
+// a Comments struct is embedded into all the expression
+// implementation types, and this gives each of those a Comment
+// method to satisfy the Expr interface.
+func (c *Comments) Comment() *Comments {
+ return c
+}
+
+// A FileSyntax represents an entire go.mod file.
+type FileSyntax struct {
+ Name string // file path
+ Comments
+ Stmt []Expr
+}
+
+func (x *FileSyntax) Span() (start, end Position) {
+ if len(x.Stmt) == 0 {
+ return
+ }
+ start, _ = x.Stmt[0].Span()
+ _, end = x.Stmt[len(x.Stmt)-1].Span()
+ return start, end
+}
+
+// addLine adds a line containing the given tokens to the file.
+//
+// If the first token of the hint matches the first token of the
+// line, the new line is added at the end of the block containing hint,
+// extracting hint into a new block if it is not yet in one.
+//
+// If the hint is non-nil buts its first token does not match,
+// the new line is added after the block containing hint
+// (or hint itself, if not in a block).
+//
+// If no hint is provided, addLine appends the line to the end of
+// the last block with a matching first token,
+// or to the end of the file if no such block exists.
+func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line {
+ if hint == nil {
+ // If no hint given, add to the last statement of the given type.
+ Loop:
+ for i := len(x.Stmt) - 1; i >= 0; i-- {
+ stmt := x.Stmt[i]
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token != nil && stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ case *LineBlock:
+ if stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ }
+ }
+ }
+
+ newLineAfter := func(i int) *Line {
+ new := &Line{Token: tokens}
+ if i == len(x.Stmt) {
+ x.Stmt = append(x.Stmt, new)
+ } else {
+ x.Stmt = append(x.Stmt, nil)
+ copy(x.Stmt[i+2:], x.Stmt[i+1:])
+ x.Stmt[i+1] = new
+ }
+ return new
+ }
+
+ if hint != nil {
+ for i, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt == hint {
+ if stmt.Token == nil || stmt.Token[0] != tokens[0] {
+ return newLineAfter(i)
+ }
+
+ // Convert line to line block.
+ stmt.InBlock = true
+ block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}}
+ stmt.Token = stmt.Token[1:]
+ x.Stmt[i] = block
+ new := &Line{Token: tokens[1:], InBlock: true}
+ block.Line = append(block.Line, new)
+ return new
+ }
+
+ case *LineBlock:
+ if stmt == hint {
+ if stmt.Token[0] != tokens[0] {
+ return newLineAfter(i)
+ }
+
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line = append(stmt.Line, new)
+ return new
+ }
+
+ for j, line := range stmt.Line {
+ if line == hint {
+ if stmt.Token[0] != tokens[0] {
+ return newLineAfter(i)
+ }
+
+ // Add new line after hint within the block.
+ stmt.Line = append(stmt.Line, nil)
+ copy(stmt.Line[j+2:], stmt.Line[j+1:])
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line[j+1] = new
+ return new
+ }
+ }
+ }
+ }
+ }
+
+ new := &Line{Token: tokens}
+ x.Stmt = append(x.Stmt, new)
+ return new
+}
+
+func (x *FileSyntax) updateLine(line *Line, tokens ...string) {
+ if line.InBlock {
+ tokens = tokens[1:]
+ }
+ line.Token = tokens
+}
+
+// markRemoved modifies line so that it (and its end-of-line comment, if any)
+// will be dropped by (*FileSyntax).Cleanup.
+func (line *Line) markRemoved() {
+ line.Token = nil
+ line.Comments.Suffix = nil
+}
+
+// Cleanup cleans up the file syntax x after any edit operations.
+// To avoid quadratic behavior, (*Line).markRemoved marks the line as dead
+// by setting line.Token = nil but does not remove it from the slice
+// in which it appears. After edits have all been indicated,
+// calling Cleanup cleans out the dead lines.
+func (x *FileSyntax) Cleanup() {
+ w := 0
+ for _, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token == nil {
+ continue
+ }
+ case *LineBlock:
+ ww := 0
+ for _, line := range stmt.Line {
+ if line.Token != nil {
+ stmt.Line[ww] = line
+ ww++
+ }
+ }
+ if ww == 0 {
+ continue
+ }
+ if ww == 1 {
+ // Collapse block into single line.
+ line := &Line{
+ Comments: Comments{
+ Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
+ Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
+ After: commentsAdd(stmt.Line[0].After, stmt.After),
+ },
+ Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
+ }
+ x.Stmt[w] = line
+ w++
+ continue
+ }
+ stmt.Line = stmt.Line[:ww]
+ }
+ x.Stmt[w] = stmt
+ w++
+ }
+ x.Stmt = x.Stmt[:w]
+}
+
+func commentsAdd(x, y []Comment) []Comment {
+ return append(x[:len(x):len(x)], y...)
+}
+
+func stringsAdd(x, y []string) []string {
+ return append(x[:len(x):len(x)], y...)
+}
+
+// A CommentBlock represents a top-level block of comments separate
+// from any rule.
+type CommentBlock struct {
+ Comments
+ Start Position
+}
+
+func (x *CommentBlock) Span() (start, end Position) {
+ return x.Start, x.Start
+}
+
+// A Line is a single line of tokens.
+type Line struct {
+ Comments
+ Start Position
+ Token []string
+ InBlock bool
+ End Position
+}
+
+func (x *Line) Span() (start, end Position) {
+ return x.Start, x.End
+}
+
+// A LineBlock is a factored block of lines, like
+//
+// require (
+// "x"
+// "y"
+// )
+type LineBlock struct {
+ Comments
+ Start Position
+ LParen LParen
+ Token []string
+ Line []*Line
+ RParen RParen
+}
+
+func (x *LineBlock) Span() (start, end Position) {
+ return x.Start, x.RParen.Pos.add(")")
+}
+
+// An LParen represents the beginning of a parenthesized line block.
+// It is a place to store suffix comments.
+type LParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *LParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An RParen represents the end of a parenthesized line block.
+// It is a place to store whole-line (before) comments.
+type RParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *RParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An input represents a single input file being parsed.
+type input struct {
+ // Lexing state.
+ filename string // name of input file, for errors
+ complete []byte // entire input
+ remaining []byte // remaining input
+ tokenStart []byte // token being scanned to end of input
+ token token // next token to be returned by lex, peek
+ pos Position // current input position
+ comments []Comment // accumulated comments
+
+ // Parser state.
+ file *FileSyntax // returned top-level syntax tree
+ parseErrors ErrorList // errors encountered during parsing
+
+ // Comment assignment state.
+ pre []Expr // all expressions, in preorder traversal
+ post []Expr // all expressions, in postorder traversal
+}
+
+func newInput(filename string, data []byte) *input {
+ return &input{
+ filename: filename,
+ complete: data,
+ remaining: data,
+ pos: Position{Line: 1, LineRune: 1, Byte: 0},
+ }
+}
+
+// parse parses the input file.
+func parse(file string, data []byte) (f *FileSyntax, err error) {
+ // The parser panics for both routine errors like syntax errors
+ // and for programmer bugs like array index errors.
+ // Turn both into error returns. Catching bug panics is
+ // especially important when processing many files.
+ in := newInput(file, data)
+ defer func() {
+ if e := recover(); e != nil && e != &in.parseErrors {
+ in.parseErrors = append(in.parseErrors, Error{
+ Filename: in.filename,
+ Pos: in.pos,
+ Err: fmt.Errorf("internal error: %v", e),
+ })
+ }
+ if err == nil && len(in.parseErrors) > 0 {
+ err = in.parseErrors
+ }
+ }()
+
+ // Prime the lexer by reading in the first token. It will be available
+ // in the next peek() or lex() call.
+ in.readToken()
+
+ // Invoke the parser.
+ in.parseFile()
+ if len(in.parseErrors) > 0 {
+ return nil, in.parseErrors
+ }
+ in.file.Name = in.filename
+
+ // Assign comments to nearby syntax.
+ in.assignComments()
+
+ return in.file, nil
+}
+
+// Error is called to report an error.
+// Error does not return: it panics.
+func (in *input) Error(s string) {
+ in.parseErrors = append(in.parseErrors, Error{
+ Filename: in.filename,
+ Pos: in.pos,
+ Err: errors.New(s),
+ })
+ panic(&in.parseErrors)
+}
+
+// eof reports whether the input has reached end of file.
+func (in *input) eof() bool {
+ return len(in.remaining) == 0
+}
+
+// peekRune returns the next rune in the input without consuming it.
+func (in *input) peekRune() int {
+ if len(in.remaining) == 0 {
+ return 0
+ }
+ r, _ := utf8.DecodeRune(in.remaining)
+ return int(r)
+}
+
+// peekPrefix reports whether the remaining input begins with the given prefix.
+func (in *input) peekPrefix(prefix string) bool {
+ // This is like bytes.HasPrefix(in.remaining, []byte(prefix))
+ // but without the allocation of the []byte copy of prefix.
+ for i := 0; i < len(prefix); i++ {
+ if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// readRune consumes and returns the next rune in the input.
+func (in *input) readRune() int {
+ if len(in.remaining) == 0 {
+ in.Error("internal lexer error: readRune at EOF")
+ }
+ r, size := utf8.DecodeRune(in.remaining)
+ in.remaining = in.remaining[size:]
+ if r == '\n' {
+ in.pos.Line++
+ in.pos.LineRune = 1
+ } else {
+ in.pos.LineRune++
+ }
+ in.pos.Byte += size
+ return int(r)
+}
+
+type token struct {
+ kind tokenKind
+ pos Position
+ endPos Position
+ text string
+}
+
+type tokenKind int
+
+const (
+ _EOF tokenKind = -(iota + 1)
+ _EOLCOMMENT
+ _IDENT
+ _STRING
+ _COMMENT
+
+ // newlines and punctuation tokens are allowed as ASCII codes.
+)
+
+func (k tokenKind) isComment() bool {
+ return k == _COMMENT || k == _EOLCOMMENT
+}
+
+// isEOL returns whether a token terminates a line.
+func (k tokenKind) isEOL() bool {
+ return k == _EOF || k == _EOLCOMMENT || k == '\n'
+}
+
+// startToken marks the beginning of the next input token.
+// It must be followed by a call to endToken, once the token's text has
+// been consumed using readRune.
+func (in *input) startToken() {
+ in.tokenStart = in.remaining
+ in.token.text = ""
+ in.token.pos = in.pos
+}
+
+// endToken marks the end of an input token.
+// It records the actual token string in tok.text.
+// A single trailing newline (LF or CRLF) will be removed from comment tokens.
+func (in *input) endToken(kind tokenKind) {
+ in.token.kind = kind
+ text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)])
+ if kind.isComment() {
+ if strings.HasSuffix(text, "\r\n") {
+ text = text[:len(text)-2]
+ } else {
+ text = strings.TrimSuffix(text, "\n")
+ }
+ }
+ in.token.text = text
+ in.token.endPos = in.pos
+}
+
+// peek returns the kind of the next token returned by lex.
+func (in *input) peek() tokenKind {
+ return in.token.kind
+}
+
+// lex is called from the parser to obtain the next input token.
+func (in *input) lex() token {
+ tok := in.token
+ in.readToken()
+ return tok
+}
+
+// readToken lexes the next token from the text and stores it in in.token.
+func (in *input) readToken() {
+ // Skip past spaces, stopping at non-space or EOF.
+ for !in.eof() {
+ c := in.peekRune()
+ if c == ' ' || c == '\t' || c == '\r' {
+ in.readRune()
+ continue
+ }
+
+ // Comment runs to end of line.
+ if in.peekPrefix("//") {
+ in.startToken()
+
+ // Is this comment the only thing on its line?
+ // Find the last \n before this // and see if it's all
+ // spaces from there to here.
+ i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
+ suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
+ in.readRune()
+ in.readRune()
+
+ // Consume comment.
+ for len(in.remaining) > 0 && in.readRune() != '\n' {
+ }
+
+ // If we are at top level (not in a statement), hand the comment to
+ // the parser as a _COMMENT token. The grammar is written
+ // to handle top-level comments itself.
+ if !suffix {
+ in.endToken(_COMMENT)
+ return
+ }
+
+ // Otherwise, save comment for later attachment to syntax tree.
+ in.endToken(_EOLCOMMENT)
+ in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix})
+ return
+ }
+
+ if in.peekPrefix("/*") {
+ in.Error("mod files must use // comments (not /* */ comments)")
+ }
+
+ // Found non-space non-comment.
+ break
+ }
+
+ // Found the beginning of the next token.
+ in.startToken()
+
+ // End of file.
+ if in.eof() {
+ in.endToken(_EOF)
+ return
+ }
+
+ // Punctuation tokens.
+ switch c := in.peekRune(); c {
+ case '\n', '(', ')', '[', ']', '{', '}', ',':
+ in.readRune()
+ in.endToken(tokenKind(c))
+ return
+
+ case '"', '`': // quoted string
+ quote := c
+ in.readRune()
+ for {
+ if in.eof() {
+ in.pos = in.token.pos
+ in.Error("unexpected EOF in string")
+ }
+ if in.peekRune() == '\n' {
+ in.Error("unexpected newline in string")
+ }
+ c := in.readRune()
+ if c == quote {
+ break
+ }
+ if c == '\\' && quote != '`' {
+ if in.eof() {
+ in.pos = in.token.pos
+ in.Error("unexpected EOF in string")
+ }
+ in.readRune()
+ }
+ }
+ in.endToken(_STRING)
+ return
+ }
+
+ // Checked all punctuation. Must be identifier token.
+ if c := in.peekRune(); !isIdent(c) {
+ in.Error(fmt.Sprintf("unexpected input character %#q", c))
+ }
+
+ // Scan over identifier.
+ for isIdent(in.peekRune()) {
+ if in.peekPrefix("//") {
+ break
+ }
+ if in.peekPrefix("/*") {
+ in.Error("mod files must use // comments (not /* */ comments)")
+ }
+ in.readRune()
+ }
+ in.endToken(_IDENT)
+}
+
+// isIdent reports whether c is an identifier rune.
+// We treat most printable runes as identifier runes, except for a handful of
+// ASCII punctuation characters.
+func isIdent(c int) bool {
+ switch r := rune(c); r {
+ case ' ', '(', ')', '[', ']', '{', '}', ',':
+ return false
+ default:
+ return !unicode.IsSpace(r) && unicode.IsPrint(r)
+ }
+}
+
+// Comment assignment.
+// We build two lists of all subexpressions, preorder and postorder.
+// The preorder list is ordered by start location, with outer expressions first.
+// The postorder list is ordered by end location, with outer expressions last.
+// We use the preorder list to assign each whole-line comment to the syntax
+// immediately following it, and we use the postorder list to assign each
+// end-of-line comment to the syntax immediately preceding it.
+
+// order walks the expression adding it and its subexpressions to the
+// preorder and postorder lists.
+func (in *input) order(x Expr) {
+ if x != nil {
+ in.pre = append(in.pre, x)
+ }
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("order: unexpected type %T", x))
+ case nil:
+ // nothing
+ case *LParen, *RParen:
+ // nothing
+ case *CommentBlock:
+ // nothing
+ case *Line:
+ // nothing
+ case *FileSyntax:
+ for _, stmt := range x.Stmt {
+ in.order(stmt)
+ }
+ case *LineBlock:
+ in.order(&x.LParen)
+ for _, l := range x.Line {
+ in.order(l)
+ }
+ in.order(&x.RParen)
+ }
+ if x != nil {
+ in.post = append(in.post, x)
+ }
+}
+
+// assignComments attaches comments to nearby syntax.
+func (in *input) assignComments() {
+ const debug = false
+
+ // Generate preorder and postorder lists.
+ in.order(in.file)
+
+ // Split into whole-line comments and suffix comments.
+ var line, suffix []Comment
+ for _, com := range in.comments {
+ if com.Suffix {
+ suffix = append(suffix, com)
+ } else {
+ line = append(line, com)
+ }
+ }
+
+ if debug {
+ for _, c := range line {
+ fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign line comments to syntax immediately following.
+ for _, x := range in.pre {
+ start, _ := x.Span()
+ if debug {
+ fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
+ }
+ xcom := x.Comment()
+ for len(line) > 0 && start.Byte >= line[0].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
+ }
+ xcom.Before = append(xcom.Before, line[0])
+ line = line[1:]
+ }
+ }
+
+ // Remaining line comments go at end of file.
+ in.file.After = append(in.file.After, line...)
+
+ if debug {
+ for _, c := range suffix {
+ fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign suffix comments to syntax immediately before.
+ for i := len(in.post) - 1; i >= 0; i-- {
+ x := in.post[i]
+
+ start, end := x.Span()
+ if debug {
+ fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
+ }
+
+ // Do not assign suffix comments to end of line block or whole file.
+ // Instead assign them to the last element inside.
+ switch x.(type) {
+ case *FileSyntax:
+ continue
+ }
+
+ // Do not assign suffix comments to something that starts
+ // on an earlier line, so that in
+ //
+ // x ( y
+ // z ) // comment
+ //
+ // we assign the comment to z and not to x ( ... ).
+ if start.Line != end.Line {
+ continue
+ }
+ xcom := x.Comment()
+ for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
+ }
+ xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
+ suffix = suffix[:len(suffix)-1]
+ }
+ }
+
+ // We assigned suffix comments in reverse.
+ // If multiple suffix comments were appended to the same
+ // expression node, they are now in reverse. Fix that.
+ for _, x := range in.post {
+ reverseComments(x.Comment().Suffix)
+ }
+
+ // Remaining suffix comments go at beginning of file.
+ in.file.Before = append(in.file.Before, suffix...)
+}
+
+// reverseComments reverses the []Comment list.
+func reverseComments(list []Comment) {
+ for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+ list[i], list[j] = list[j], list[i]
+ }
+}
+
+func (in *input) parseFile() {
+ in.file = new(FileSyntax)
+ var cb *CommentBlock
+ for {
+ switch in.peek() {
+ case '\n':
+ in.lex()
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ cb = nil
+ }
+ case _COMMENT:
+ tok := in.lex()
+ if cb == nil {
+ cb = &CommentBlock{Start: tok.pos}
+ }
+ com := cb.Comment()
+ com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text})
+ case _EOF:
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ }
+ return
+ default:
+ in.parseStmt()
+ if cb != nil {
+ in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
+ cb = nil
+ }
+ }
+ }
+}
+
+func (in *input) parseStmt() {
+ tok := in.lex()
+ start := tok.pos
+ end := tok.endPos
+ tokens := []string{tok.text}
+ for {
+ tok := in.lex()
+ switch {
+ case tok.kind.isEOL():
+ in.file.Stmt = append(in.file.Stmt, &Line{
+ Start: start,
+ Token: tokens,
+ End: end,
+ })
+ return
+
+ case tok.kind == '(':
+ if next := in.peek(); next.isEOL() {
+ // Start of block: no more tokens on this line.
+ in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok))
+ return
+ } else if next == ')' {
+ rparen := in.lex()
+ if in.peek().isEOL() {
+ // Empty block.
+ in.lex()
+ in.file.Stmt = append(in.file.Stmt, &LineBlock{
+ Start: start,
+ Token: tokens,
+ LParen: LParen{Pos: tok.pos},
+ RParen: RParen{Pos: rparen.pos},
+ })
+ return
+ }
+ // '( )' in the middle of the line, not a block.
+ tokens = append(tokens, tok.text, rparen.text)
+ } else {
+ // '(' in the middle of the line, not a block.
+ tokens = append(tokens, tok.text)
+ }
+
+ default:
+ tokens = append(tokens, tok.text)
+ end = tok.endPos
+ }
+ }
+}
+
+func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock {
+ x := &LineBlock{
+ Start: start,
+ Token: token,
+ LParen: LParen{Pos: lparen.pos},
+ }
+ var comments []Comment
+ for {
+ switch in.peek() {
+ case _EOLCOMMENT:
+ // Suffix comment, will be attached later by assignComments.
+ in.lex()
+ case '\n':
+ // Blank line. Add an empty comment to preserve it.
+ in.lex()
+ if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
+ comments = append(comments, Comment{})
+ }
+ case _COMMENT:
+ tok := in.lex()
+ comments = append(comments, Comment{Start: tok.pos, Token: tok.text})
+ case _EOF:
+ in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
+ case ')':
+ rparen := in.lex()
+ x.RParen.Before = comments
+ x.RParen.Pos = rparen.pos
+ if !in.peek().isEOL() {
+ in.Error("syntax error (expected newline after closing paren)")
+ }
+ in.lex()
+ return x
+ default:
+ l := in.parseLine()
+ x.Line = append(x.Line, l)
+ l.Comment().Before = comments
+ comments = nil
+ }
+ }
+}
+
+func (in *input) parseLine() *Line {
+ tok := in.lex()
+ if tok.kind.isEOL() {
+ in.Error("internal parse error: parseLine at end of line")
+ }
+ start := tok.pos
+ end := tok.endPos
+ tokens := []string{tok.text}
+ for {
+ tok := in.lex()
+ if tok.kind.isEOL() {
+ return &Line{
+ Start: start,
+ Token: tokens,
+ End: end,
+ InBlock: true,
+ }
+ }
+ tokens = append(tokens, tok.text)
+ end = tok.endPos
+ }
+}
+
+var (
+ slashSlash = []byte("//")
+ moduleStr = []byte("module")
+)
+
+// ModulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+func ModulePath(mod []byte) string {
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+ return "" // missing module path
+}
diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go
new file mode 100644
index 000000000..6bcde8fab
--- /dev/null
+++ b/vendor/golang.org/x/mod/modfile/rule.go
@@ -0,0 +1,1559 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modfile implements a parser and formatter for go.mod files.
+//
+// The go.mod syntax is described in
+// https://golang.org/cmd/go/#hdr-The_go_mod_file.
+//
+// The Parse and ParseLax functions both parse a go.mod file and return an
+// abstract syntax tree. ParseLax ignores unknown statements and may be used to
+// parse go.mod files that may have been developed with newer versions of Go.
+//
+// The File struct returned by Parse and ParseLax represent an abstract
+// go.mod file. File has several methods like AddNewRequire and DropReplace
+// that can be used to programmatically edit a file.
+//
+// The Format function formats a File back to a byte slice which can be
+// written to a file.
+package modfile
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "golang.org/x/mod/internal/lazyregexp"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
+)
+
+// A File is the parsed, interpreted form of a go.mod file.
+type File struct {
+ Module *Module
+ Go *Go
+ Require []*Require
+ Exclude []*Exclude
+ Replace []*Replace
+ Retract []*Retract
+
+ Syntax *FileSyntax
+}
+
+// A Module is the module statement.
+type Module struct {
+ Mod module.Version
+ Deprecated string
+ Syntax *Line
+}
+
+// A Go is the go statement.
+type Go struct {
+ Version string // "1.23"
+ Syntax *Line
+}
+
+// An Exclude is a single exclude statement.
+type Exclude struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Replace is a single replace statement.
+type Replace struct {
+ Old module.Version
+ New module.Version
+ Syntax *Line
+}
+
+// A Retract is a single retract statement.
+type Retract struct {
+ VersionInterval
+ Rationale string
+ Syntax *Line
+}
+
+// A VersionInterval represents a range of versions with upper and lower bounds.
+// Intervals are closed: both bounds are included. When Low is equal to High,
+// the interval may refer to a single version ('v1.2.3') or an interval
+// ('[v1.2.3, v1.2.3]'); both have the same representation.
+type VersionInterval struct {
+ Low, High string
+}
+
+// A Require is a single require statement.
+type Require struct {
+ Mod module.Version
+ Indirect bool // has "// indirect" comment
+ Syntax *Line
+}
+
+func (r *Require) markRemoved() {
+ r.Syntax.markRemoved()
+ *r = Require{}
+}
+
+func (r *Require) setVersion(v string) {
+ r.Mod.Version = v
+
+ if line := r.Syntax; len(line.Token) > 0 {
+ if line.InBlock {
+ // If the line is preceded by an empty line, remove it; see
+ // https://golang.org/issue/33779.
+ if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 {
+ line.Comments.Before = line.Comments.Before[:0]
+ }
+ if len(line.Token) >= 2 { // example.com v1.2.3
+ line.Token[1] = v
+ }
+ } else {
+ if len(line.Token) >= 3 { // require example.com v1.2.3
+ line.Token[2] = v
+ }
+ }
+ }
+}
+
+// setIndirect sets line to have (or not have) a "// indirect" comment.
+func (r *Require) setIndirect(indirect bool) {
+ r.Indirect = indirect
+ line := r.Syntax
+ if isIndirect(line) == indirect {
+ return
+ }
+ if indirect {
+ // Adding comment.
+ if len(line.Suffix) == 0 {
+ // New comment.
+ line.Suffix = []Comment{{Token: "// indirect", Suffix: true}}
+ return
+ }
+
+ com := &line.Suffix[0]
+ text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash)))
+ if text == "" {
+ // Empty comment.
+ com.Token = "// indirect"
+ return
+ }
+
+ // Insert at beginning of existing comment.
+ com.Token = "// indirect; " + text
+ return
+ }
+
+ // Removing comment.
+ f := strings.TrimSpace(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash)))
+ if f == "indirect" {
+ // Remove whole comment.
+ line.Suffix = nil
+ return
+ }
+
+ // Remove comment prefix.
+ com := &line.Suffix[0]
+ i := strings.Index(com.Token, "indirect;")
+ com.Token = "//" + com.Token[i+len("indirect;"):]
+}
+
+// isIndirect reports whether line has a "// indirect" comment,
+// meaning it is in go.mod only for its effect on indirect dependencies,
+// so that it can be dropped entirely once the effective version of the
+// indirect dependency reaches the given minimum version.
+func isIndirect(line *Line) bool {
+ if len(line.Suffix) == 0 {
+ return false
+ }
+ f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash)))
+ return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;")
+}
+
+func (f *File) AddModuleStmt(path string) error {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ if f.Module == nil {
+ f.Module = &Module{
+ Mod: module.Version{Path: path},
+ Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)),
+ }
+ } else {
+ f.Module.Mod.Path = path
+ f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path))
+ }
+ return nil
+}
+
+func (f *File) AddComment(text string) {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{
+ Comments: Comments{
+ Before: []Comment{
+ {
+ Token: text,
+ },
+ },
+ },
+ })
+}
+
+type VersionFixer func(path, version string) (string, error)
+
+// errDontFix is returned by a VersionFixer to indicate the version should be
+// left alone, even if it's not canonical.
+var dontFixRetract VersionFixer = func(_, vers string) (string, error) {
+ return vers, nil
+}
+
+// Parse parses and returns a go.mod file.
+//
+// file is the name of the file, used in positions and errors.
+//
+// data is the content of the file.
+//
+// fix is an optional function that canonicalizes module versions.
+// If fix is nil, all module versions must be canonical (module.CanonicalVersion
+// must return the same string).
+func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, true)
+}
+
+// ParseLax is like Parse but ignores unknown statements.
+// It is used when parsing go.mod files other than the main module,
+// under the theory that most statement types we add in the future will
+// only apply in the main module, like exclude and replace,
+// and so we get better gradual deployments if old go commands
+// simply ignore those statements when found in go.mod files
+// in dependencies.
+func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, false)
+}
+
+func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) {
+ fs, err := parse(file, data)
+ if err != nil {
+ return nil, err
+ }
+ f := &File{
+ Syntax: fs,
+ }
+ var errs ErrorList
+
+ // fix versions in retract directives after the file is parsed.
+ // We need the module path to fix versions, and it might be at the end.
+ defer func() {
+ oldLen := len(errs)
+ f.fixRetract(fix, &errs)
+ if len(errs) > oldLen {
+ parsed, err = nil, errs
+ }
+ }()
+
+ for _, x := range fs.Stmt {
+ switch x := x.(type) {
+ case *Line:
+ f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict)
+
+ case *LineBlock:
+ if len(x.Token) > 1 {
+ if strict {
+ errs = append(errs, Error{
+ Filename: file,
+ Pos: x.Start,
+ Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+ })
+ }
+ continue
+ }
+ switch x.Token[0] {
+ default:
+ if strict {
+ errs = append(errs, Error{
+ Filename: file,
+ Pos: x.Start,
+ Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+ })
+ }
+ continue
+ case "module", "require", "exclude", "replace", "retract":
+ for _, l := range x.Line {
+ f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return f, nil
+}
+
+var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
+var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`)
+
+func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
+ // If strict is false, this module is a dependency.
+ // We ignore all unknown directives as well as main-module-only
+ // directives like replace and exclude. It will work better for
+ // forward compatibility if we can depend on modules that have unknown
+ // statements (presumed relevant only when acting as the main module)
+ // and simply ignore those statements.
+ if !strict {
+ switch verb {
+ case "go", "module", "retract", "require":
+ // want these even for dependency go.mods
+ default:
+ return
+ }
+ }
+
+ wrapModPathError := func(modPath string, err error) {
+ *errs = append(*errs, Error{
+ Filename: f.Syntax.Name,
+ Pos: line.Start,
+ ModPath: modPath,
+ Verb: verb,
+ Err: err,
+ })
+ }
+ wrapError := func(err error) {
+ *errs = append(*errs, Error{
+ Filename: f.Syntax.Name,
+ Pos: line.Start,
+ Err: err,
+ })
+ }
+ errorf := func(format string, args ...interface{}) {
+ wrapError(fmt.Errorf(format, args...))
+ }
+
+ switch verb {
+ default:
+ errorf("unknown directive: %s", verb)
+
+ case "go":
+ if f.Go != nil {
+ errorf("repeated go statement")
+ return
+ }
+ if len(args) != 1 {
+ errorf("go directive expects exactly one argument")
+ return
+ } else if !GoVersionRE.MatchString(args[0]) {
+ fixed := false
+ if !strict {
+ if m := laxGoVersionRE.FindStringSubmatch(args[0]); m != nil {
+ args[0] = m[1]
+ fixed = true
+ }
+ }
+ if !fixed {
+ errorf("invalid go version '%s': must match format 1.23", args[0])
+ return
+ }
+ }
+
+ f.Go = &Go{Syntax: line}
+ f.Go.Version = args[0]
+
+ case "module":
+ if f.Module != nil {
+ errorf("repeated module statement")
+ return
+ }
+ deprecated := parseDeprecation(block, line)
+ f.Module = &Module{
+ Syntax: line,
+ Deprecated: deprecated,
+ }
+ if len(args) != 1 {
+ errorf("usage: module module/path")
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ errorf("invalid quoted string: %v", err)
+ return
+ }
+ f.Module.Mod = module.Version{Path: s}
+
+ case "require", "exclude":
+ if len(args) != 2 {
+ errorf("usage: %s module/path v1.2.3", verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ errorf("invalid quoted string: %v", err)
+ return
+ }
+ v, err := parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ wrapError(err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ wrapError(err)
+ return
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ wrapModPathError(s, err)
+ return
+ }
+ if verb == "require" {
+ f.Require = append(f.Require, &Require{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ Indirect: isIndirect(line),
+ })
+ } else {
+ f.Exclude = append(f.Exclude, &Exclude{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ })
+ }
+
+ case "replace":
+ replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix)
+ if wrappederr != nil {
+ *errs = append(*errs, *wrappederr)
+ return
+ }
+ f.Replace = append(f.Replace, replace)
+
+ case "retract":
+ rationale := parseDirectiveComment(block, line)
+ vi, err := parseVersionInterval(verb, "", &args, dontFixRetract)
+ if err != nil {
+ if strict {
+ wrapError(err)
+ return
+ } else {
+ // Only report errors parsing intervals in the main module. We may
+ // support additional syntax in the future, such as open and half-open
+ // intervals. Those can't be supported now, because they break the
+ // go.mod parser, even in lax mode.
+ return
+ }
+ }
+ if len(args) > 0 && strict {
+ // In the future, there may be additional information after the version.
+ errorf("unexpected token after version: %q", args[0])
+ return
+ }
+ retract := &Retract{
+ VersionInterval: vi,
+ Rationale: rationale,
+ Syntax: line,
+ }
+ f.Retract = append(f.Retract, retract)
+ }
+}
+
+func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) {
+ wrapModPathError := func(modPath string, err error) *Error {
+ return &Error{
+ Filename: filename,
+ Pos: line.Start,
+ ModPath: modPath,
+ Verb: verb,
+ Err: err,
+ }
+ }
+ wrapError := func(err error) *Error {
+ return &Error{
+ Filename: filename,
+ Pos: line.Start,
+ Err: err,
+ }
+ }
+ errorf := func(format string, args ...interface{}) *Error {
+ return wrapError(fmt.Errorf(format, args...))
+ }
+
+ arrow := 2
+ if len(args) >= 2 && args[1] == "=>" {
+ arrow = 1
+ }
+ if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
+ return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb)
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ return nil, errorf("invalid quoted string: %v", err)
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ return nil, wrapModPathError(s, err)
+
+ }
+ var v string
+ if arrow == 2 {
+ v, err = parseVersion(verb, s, &args[1], fix)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+ if err := module.CheckPathMajor(v, pathMajor); err != nil {
+ return nil, wrapModPathError(s, err)
+ }
+ }
+ ns, err := parseString(&args[arrow+1])
+ if err != nil {
+ return nil, errorf("invalid quoted string: %v", err)
+ }
+ nv := ""
+ if len(args) == arrow+2 {
+ if !IsDirectoryPath(ns) {
+ if strings.Contains(ns, "@") {
+ return nil, errorf("replacement module must match format 'path version', not 'path@version'")
+ }
+ return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)")
+ }
+ if filepath.Separator == '/' && strings.Contains(ns, `\`) {
+ return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)")
+ }
+ }
+ if len(args) == arrow+3 {
+ nv, err = parseVersion(verb, ns, &args[arrow+2], fix)
+ if err != nil {
+ return nil, wrapError(err)
+ }
+ if IsDirectoryPath(ns) {
+ return nil, errorf("replacement module directory path %q cannot have version", ns)
+
+ }
+ }
+ return &Replace{
+ Old: module.Version{Path: s, Version: v},
+ New: module.Version{Path: ns, Version: nv},
+ Syntax: line,
+ }, nil
+}
+
+// fixRetract applies fix to each retract directive in f, appending any errors
+// to errs.
+//
+// Most versions are fixed as we parse the file, but for retract directives,
+// the relevant module path is the one specified with the module directive,
+// and that might appear at the end of the file (or not at all).
+func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) {
+ if fix == nil {
+ return
+ }
+ path := ""
+ if f.Module != nil {
+ path = f.Module.Mod.Path
+ }
+ var r *Retract
+ wrapError := func(err error) {
+ *errs = append(*errs, Error{
+ Filename: f.Syntax.Name,
+ Pos: r.Syntax.Start,
+ Err: err,
+ })
+ }
+
+ for _, r = range f.Retract {
+ if path == "" {
+ wrapError(errors.New("no module directive found, so retract cannot be used"))
+ return // only print the first one of these
+ }
+
+ args := r.Syntax.Token
+ if args[0] == "retract" {
+ args = args[1:]
+ }
+ vi, err := parseVersionInterval("retract", path, &args, fix)
+ if err != nil {
+ wrapError(err)
+ }
+ r.VersionInterval = vi
+ }
+}
+
+func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) {
+ wrapError := func(err error) {
+ *errs = append(*errs, Error{
+ Filename: f.Syntax.Name,
+ Pos: line.Start,
+ Err: err,
+ })
+ }
+ errorf := func(format string, args ...interface{}) {
+ wrapError(fmt.Errorf(format, args...))
+ }
+
+ switch verb {
+ default:
+ errorf("unknown directive: %s", verb)
+
+ case "go":
+ if f.Go != nil {
+ errorf("repeated go statement")
+ return
+ }
+ if len(args) != 1 {
+ errorf("go directive expects exactly one argument")
+ return
+ } else if !GoVersionRE.MatchString(args[0]) {
+ errorf("invalid go version '%s': must match format 1.23", args[0])
+ return
+ }
+
+ f.Go = &Go{Syntax: line}
+ f.Go.Version = args[0]
+
+ case "use":
+ if len(args) != 1 {
+ errorf("usage: %s local/dir", verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ errorf("invalid quoted string: %v", err)
+ return
+ }
+ f.Use = append(f.Use, &Use{
+ Path: s,
+ Syntax: line,
+ })
+
+ case "replace":
+ replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix)
+ if wrappederr != nil {
+ *errs = append(*errs, *wrappederr)
+ return
+ }
+ f.Replace = append(f.Replace, replace)
+ }
+}
+
+// IsDirectoryPath reports whether the given path should be interpreted
+// as a directory path. Just like on the go command line, relative paths
+// and rooted paths are directory paths; the rest are module paths.
+func IsDirectoryPath(ns string) bool {
+ // Because go.mod files can move from one system to another,
+ // we check all known path syntaxes, both Unix and Windows.
+ return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
+ strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
+ len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
+}
+
+// MustQuote reports whether s must be quoted in order to appear as
+// a single token in a go.mod line.
+func MustQuote(s string) bool {
+ for _, r := range s {
+ switch r {
+ case ' ', '"', '\'', '`':
+ return true
+
+ case '(', ')', '[', ']', '{', '}', ',':
+ if len(s) > 1 {
+ return true
+ }
+
+ default:
+ if !unicode.IsPrint(r) {
+ return true
+ }
+ }
+ }
+ return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*")
+}
+
+// AutoQuote returns s or, if quoting is required for s to appear in a go.mod,
+// the quotation of s.
+func AutoQuote(s string) string {
+ if MustQuote(s) {
+ return strconv.Quote(s)
+ }
+ return s
+}
+
+func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) {
+ toks := *args
+ if len(toks) == 0 || toks[0] == "(" {
+ return VersionInterval{}, fmt.Errorf("expected '[' or version")
+ }
+ if toks[0] != "[" {
+ v, err := parseVersion(verb, path, &toks[0], fix)
+ if err != nil {
+ return VersionInterval{}, err
+ }
+ *args = toks[1:]
+ return VersionInterval{Low: v, High: v}, nil
+ }
+ toks = toks[1:]
+
+ if len(toks) == 0 {
+ return VersionInterval{}, fmt.Errorf("expected version after '['")
+ }
+ low, err := parseVersion(verb, path, &toks[0], fix)
+ if err != nil {
+ return VersionInterval{}, err
+ }
+ toks = toks[1:]
+
+ if len(toks) == 0 || toks[0] != "," {
+ return VersionInterval{}, fmt.Errorf("expected ',' after version")
+ }
+ toks = toks[1:]
+
+ if len(toks) == 0 {
+ return VersionInterval{}, fmt.Errorf("expected version after ','")
+ }
+ high, err := parseVersion(verb, path, &toks[0], fix)
+ if err != nil {
+ return VersionInterval{}, err
+ }
+ toks = toks[1:]
+
+ if len(toks) == 0 || toks[0] != "]" {
+ return VersionInterval{}, fmt.Errorf("expected ']' after version")
+ }
+ toks = toks[1:]
+
+ *args = toks
+ return VersionInterval{Low: low, High: high}, nil
+}
+
+func parseString(s *string) (string, error) {
+ t := *s
+ if strings.HasPrefix(t, `"`) {
+ var err error
+ if t, err = strconv.Unquote(t); err != nil {
+ return "", err
+ }
+ } else if strings.ContainsAny(t, "\"'`") {
+ // Other quotes are reserved both for possible future expansion
+ // and to avoid confusion. For example if someone types 'x'
+ // we want that to be a syntax error and not a literal x in literal quotation marks.
+ return "", fmt.Errorf("unquoted string cannot contain quote")
+ }
+ *s = AutoQuote(t)
+ return t, nil
+}
+
+var deprecatedRE = lazyregexp.New(`(?s)(?:^|\n\n)Deprecated: *(.*?)(?:$|\n\n)`)
+
+// parseDeprecation extracts the text of comments on a "module" directive and
+// extracts a deprecation message from that.
+//
+// A deprecation message is contained in a paragraph within a block of comments
+// that starts with "Deprecated:" (case sensitive). The message runs until the
+// end of the paragraph and does not include the "Deprecated:" prefix. If the
+// comment block has multiple paragraphs that start with "Deprecated:",
+// parseDeprecation returns the message from the first.
+func parseDeprecation(block *LineBlock, line *Line) string {
+ text := parseDirectiveComment(block, line)
+ m := deprecatedRE.FindStringSubmatch(text)
+ if m == nil {
+ return ""
+ }
+ return m[1]
+}
+
+// parseDirectiveComment extracts the text of comments on a directive.
+// If the directive's line does not have comments and is part of a block that
+// does have comments, the block's comments are used.
+func parseDirectiveComment(block *LineBlock, line *Line) string {
+ comments := line.Comment()
+ if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 {
+ comments = block.Comment()
+ }
+ groups := [][]Comment{comments.Before, comments.Suffix}
+ var lines []string
+ for _, g := range groups {
+ for _, c := range g {
+ if !strings.HasPrefix(c.Token, "//") {
+ continue // blank line
+ }
+ lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//")))
+ }
+ }
+ return strings.Join(lines, "\n")
+}
+
+type ErrorList []Error
+
+func (e ErrorList) Error() string {
+ errStrs := make([]string, len(e))
+ for i, err := range e {
+ errStrs[i] = err.Error()
+ }
+ return strings.Join(errStrs, "\n")
+}
+
+type Error struct {
+ Filename string
+ Pos Position
+ Verb string
+ ModPath string
+ Err error
+}
+
+func (e *Error) Error() string {
+ var pos string
+ if e.Pos.LineRune > 1 {
+ // Don't print LineRune if it's 1 (beginning of line).
+ // It's always 1 except in scanner errors, which are rare.
+ pos = fmt.Sprintf("%s:%d:%d: ", e.Filename, e.Pos.Line, e.Pos.LineRune)
+ } else if e.Pos.Line > 0 {
+ pos = fmt.Sprintf("%s:%d: ", e.Filename, e.Pos.Line)
+ } else if e.Filename != "" {
+ pos = fmt.Sprintf("%s: ", e.Filename)
+ }
+
+ var directive string
+ if e.ModPath != "" {
+ directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath)
+ } else if e.Verb != "" {
+ directive = fmt.Sprintf("%s: ", e.Verb)
+ }
+
+ return pos + directive + e.Err.Error()
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+
+func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) {
+ t, err := parseString(s)
+ if err != nil {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: &module.InvalidVersionError{
+ Version: *s,
+ Err: err,
+ },
+ }
+ }
+ if fix != nil {
+ fixed, err := fix(path, t)
+ if err != nil {
+ if err, ok := err.(*module.ModuleError); ok {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: err.Err,
+ }
+ }
+ return "", err
+ }
+ t = fixed
+ } else {
+ cv := module.CanonicalVersion(t)
+ if cv == "" {
+ return "", &Error{
+ Verb: verb,
+ ModPath: path,
+ Err: &module.InvalidVersionError{
+ Version: t,
+ Err: errors.New("must be of the form v1.2.3"),
+ },
+ }
+ }
+ t = cv
+ }
+ *s = t
+ return *s, nil
+}
+
+func modulePathMajor(path string) (string, error) {
+ _, major, ok := module.SplitPathVersion(path)
+ if !ok {
+ return "", fmt.Errorf("invalid module path")
+ }
+ return major, nil
+}
+
+func (f *File) Format() ([]byte, error) {
+ return Format(f.Syntax), nil
+}
+
+// Cleanup cleans up the file f after any edit operations.
+// To avoid quadratic behavior, modifications like DropRequire
+// clear the entry but do not remove it from the slice.
+// Cleanup cleans out all the cleared entries.
+func (f *File) Cleanup() {
+ w := 0
+ for _, r := range f.Require {
+ if r.Mod.Path != "" {
+ f.Require[w] = r
+ w++
+ }
+ }
+ f.Require = f.Require[:w]
+
+ w = 0
+ for _, x := range f.Exclude {
+ if x.Mod.Path != "" {
+ f.Exclude[w] = x
+ w++
+ }
+ }
+ f.Exclude = f.Exclude[:w]
+
+ w = 0
+ for _, r := range f.Replace {
+ if r.Old.Path != "" {
+ f.Replace[w] = r
+ w++
+ }
+ }
+ f.Replace = f.Replace[:w]
+
+ w = 0
+ for _, r := range f.Retract {
+ if r.Low != "" || r.High != "" {
+ f.Retract[w] = r
+ w++
+ }
+ }
+ f.Retract = f.Retract[:w]
+
+ f.Syntax.Cleanup()
+}
+
+func (f *File) AddGoStmt(version string) error {
+ if !GoVersionRE.MatchString(version) {
+ return fmt.Errorf("invalid language version string %q", version)
+ }
+ if f.Go == nil {
+ var hint Expr
+ if f.Module != nil && f.Module.Syntax != nil {
+ hint = f.Module.Syntax
+ }
+ f.Go = &Go{
+ Version: version,
+ Syntax: f.Syntax.addLine(hint, "go", version),
+ }
+ } else {
+ f.Go.Version = version
+ f.Syntax.updateLine(f.Go.Syntax, "go", version)
+ }
+ return nil
+}
+
+// AddRequire sets the first require line for path to version vers,
+// preserving any existing comments for that line and removing all
+// other lines for path.
+//
+// If no line currently exists for path, AddRequire adds a new line
+// at the end of the last require block.
+func (f *File) AddRequire(path, vers string) error {
+ need := true
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ if need {
+ r.Mod.Version = vers
+ f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers)
+ need = false
+ } else {
+ r.Syntax.markRemoved()
+ *r = Require{}
+ }
+ }
+ }
+
+ if need {
+ f.AddNewRequire(path, vers, false)
+ }
+ return nil
+}
+
+// AddNewRequire adds a new require line for path at version vers at the end of
+// the last require block, regardless of any existing require lines for path.
+func (f *File) AddNewRequire(path, vers string, indirect bool) {
+ line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers)
+ r := &Require{
+ Mod: module.Version{Path: path, Version: vers},
+ Syntax: line,
+ }
+ r.setIndirect(indirect)
+ f.Require = append(f.Require, r)
+}
+
+// SetRequire updates the requirements of f to contain exactly req, preserving
+// the existing block structure and line comment contents (except for 'indirect'
+// markings) for the first requirement on each named module path.
+//
+// The Syntax field is ignored for the requirements in req.
+//
+// Any requirements not already present in the file are added to the block
+// containing the last require line.
+//
+// The requirements in req must specify at most one distinct version for each
+// module path.
+//
+// If any existing requirements may be removed, the caller should call Cleanup
+// after all edits are complete.
+func (f *File) SetRequire(req []*Require) {
+ type elem struct {
+ version string
+ indirect bool
+ }
+ need := make(map[string]elem)
+ for _, r := range req {
+ if prev, dup := need[r.Mod.Path]; dup && prev.version != r.Mod.Version {
+ panic(fmt.Errorf("SetRequire called with conflicting versions for path %s (%s and %s)", r.Mod.Path, prev.version, r.Mod.Version))
+ }
+ need[r.Mod.Path] = elem{r.Mod.Version, r.Indirect}
+ }
+
+ // Update or delete the existing Require entries to preserve
+ // only the first for each module path in req.
+ for _, r := range f.Require {
+ e, ok := need[r.Mod.Path]
+ if ok {
+ r.setVersion(e.version)
+ r.setIndirect(e.indirect)
+ } else {
+ r.markRemoved()
+ }
+ delete(need, r.Mod.Path)
+ }
+
+ // Add new entries in the last block of the file for any paths that weren't
+ // already present.
+ //
+ // This step is nondeterministic, but the final result will be deterministic
+ // because we will sort the block.
+ for path, e := range need {
+ f.AddNewRequire(path, e.version, e.indirect)
+ }
+
+ f.SortBlocks()
+}
+
+// SetRequireSeparateIndirect updates the requirements of f to contain the given
+// requirements. Comment contents (except for 'indirect' markings) are retained
+// from the first existing requirement for each module path. Like SetRequire,
+// SetRequireSeparateIndirect adds requirements for new paths in req,
+// updates the version and "// indirect" comment on existing requirements,
+// and deletes requirements on paths not in req. Existing duplicate requirements
+// are deleted.
+//
+// As its name suggests, SetRequireSeparateIndirect puts direct and indirect
+// requirements into two separate blocks, one containing only direct
+// requirements, and the other containing only indirect requirements.
+// SetRequireSeparateIndirect may move requirements between these two blocks
+// when their indirect markings change. However, SetRequireSeparateIndirect
+// won't move requirements from other blocks, especially blocks with comments.
+//
+// If the file initially has one uncommented block of requirements,
+// SetRequireSeparateIndirect will split it into a direct-only and indirect-only
+// block. This aids in the transition to separate blocks.
+func (f *File) SetRequireSeparateIndirect(req []*Require) {
+ // hasComments returns whether a line or block has comments
+ // other than "indirect".
+ hasComments := func(c Comments) bool {
+ return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 ||
+ (len(c.Suffix) == 1 &&
+ strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect")
+ }
+
+ // moveReq adds r to block. If r was in another block, moveReq deletes
+ // it from that block and transfers its comments.
+ moveReq := func(r *Require, block *LineBlock) {
+ var line *Line
+ if r.Syntax == nil {
+ line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}}
+ r.Syntax = line
+ if r.Indirect {
+ r.setIndirect(true)
+ }
+ } else {
+ line = new(Line)
+ *line = *r.Syntax
+ if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" {
+ line.Token = line.Token[1:]
+ }
+ r.Syntax.Token = nil // Cleanup will delete the old line.
+ r.Syntax = line
+ }
+ line.InBlock = true
+ block.Line = append(block.Line, line)
+ }
+
+ // Examine existing require lines and blocks.
+ var (
+ // We may insert new requirements into the last uncommented
+ // direct-only and indirect-only blocks. We may also move requirements
+ // to the opposite block if their indirect markings change.
+ lastDirectIndex = -1
+ lastIndirectIndex = -1
+
+ // If there are no direct-only or indirect-only blocks, a new block may
+ // be inserted after the last require line or block.
+ lastRequireIndex = -1
+
+ // If there's only one require line or block, and it's uncommented,
+ // we'll move its requirements to the direct-only or indirect-only blocks.
+ requireLineOrBlockCount = 0
+
+ // Track the block each requirement belongs to (if any) so we can
+ // move them later.
+ lineToBlock = make(map[*Line]*LineBlock)
+ )
+ for i, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if len(stmt.Token) == 0 || stmt.Token[0] != "require" {
+ continue
+ }
+ lastRequireIndex = i
+ requireLineOrBlockCount++
+ if !hasComments(stmt.Comments) {
+ if isIndirect(stmt) {
+ lastIndirectIndex = i
+ } else {
+ lastDirectIndex = i
+ }
+ }
+
+ case *LineBlock:
+ if len(stmt.Token) == 0 || stmt.Token[0] != "require" {
+ continue
+ }
+ lastRequireIndex = i
+ requireLineOrBlockCount++
+ allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments)
+ allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments)
+ for _, line := range stmt.Line {
+ lineToBlock[line] = stmt
+ if hasComments(line.Comments) {
+ allDirect = false
+ allIndirect = false
+ } else if isIndirect(line) {
+ allDirect = false
+ } else {
+ allIndirect = false
+ }
+ }
+ if allDirect {
+ lastDirectIndex = i
+ }
+ if allIndirect {
+ lastIndirectIndex = i
+ }
+ }
+ }
+
+ oneFlatUncommentedBlock := requireLineOrBlockCount == 1 &&
+ !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment())
+
+ // Create direct and indirect blocks if needed. Convert lines into blocks
+ // if needed. If we end up with an empty block or a one-line block,
+ // Cleanup will delete it or convert it to a line later.
+ insertBlock := func(i int) *LineBlock {
+ block := &LineBlock{Token: []string{"require"}}
+ f.Syntax.Stmt = append(f.Syntax.Stmt, nil)
+ copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:])
+ f.Syntax.Stmt[i] = block
+ return block
+ }
+
+ ensureBlock := func(i int) *LineBlock {
+ switch stmt := f.Syntax.Stmt[i].(type) {
+ case *LineBlock:
+ return stmt
+ case *Line:
+ block := &LineBlock{
+ Token: []string{"require"},
+ Line: []*Line{stmt},
+ }
+ stmt.Token = stmt.Token[1:] // remove "require"
+ stmt.InBlock = true
+ f.Syntax.Stmt[i] = block
+ return block
+ default:
+ panic(fmt.Sprintf("unexpected statement: %v", stmt))
+ }
+ }
+
+ var lastDirectBlock *LineBlock
+ if lastDirectIndex < 0 {
+ if lastIndirectIndex >= 0 {
+ lastDirectIndex = lastIndirectIndex
+ lastIndirectIndex++
+ } else if lastRequireIndex >= 0 {
+ lastDirectIndex = lastRequireIndex + 1
+ } else {
+ lastDirectIndex = len(f.Syntax.Stmt)
+ }
+ lastDirectBlock = insertBlock(lastDirectIndex)
+ } else {
+ lastDirectBlock = ensureBlock(lastDirectIndex)
+ }
+
+ var lastIndirectBlock *LineBlock
+ if lastIndirectIndex < 0 {
+ lastIndirectIndex = lastDirectIndex + 1
+ lastIndirectBlock = insertBlock(lastIndirectIndex)
+ } else {
+ lastIndirectBlock = ensureBlock(lastIndirectIndex)
+ }
+
+ // Delete requirements we don't want anymore.
+ // Update versions and indirect comments on requirements we want to keep.
+ // If a requirement is in last{Direct,Indirect}Block with the wrong
+ // indirect marking after this, or if the requirement is in an single
+ // uncommented mixed block (oneFlatUncommentedBlock), move it to the
+ // correct block.
+ //
+ // Some blocks may be empty after this. Cleanup will remove them.
+ need := make(map[string]*Require)
+ for _, r := range req {
+ need[r.Mod.Path] = r
+ }
+ have := make(map[string]*Require)
+ for _, r := range f.Require {
+ path := r.Mod.Path
+ if need[path] == nil || have[path] != nil {
+ // Requirement not needed, or duplicate requirement. Delete.
+ r.markRemoved()
+ continue
+ }
+ have[r.Mod.Path] = r
+ r.setVersion(need[path].Mod.Version)
+ r.setIndirect(need[path].Indirect)
+ if need[path].Indirect &&
+ (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) {
+ moveReq(r, lastIndirectBlock)
+ } else if !need[path].Indirect &&
+ (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) {
+ moveReq(r, lastDirectBlock)
+ }
+ }
+
+ // Add new requirements.
+ for path, r := range need {
+ if have[path] == nil {
+ if r.Indirect {
+ moveReq(r, lastIndirectBlock)
+ } else {
+ moveReq(r, lastDirectBlock)
+ }
+ f.Require = append(f.Require, r)
+ }
+ }
+
+ f.SortBlocks()
+}
+
+func (f *File) DropRequire(path string) error {
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ r.Syntax.markRemoved()
+ *r = Require{}
+ }
+ }
+ return nil
+}
+
+// AddExclude adds a exclude statement to the mod file. Errors if the provided
+// version is not a canonical version string
+func (f *File) AddExclude(path, vers string) error {
+ if err := checkCanonicalVersion(path, vers); err != nil {
+ return err
+ }
+
+ var hint *Line
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ return nil
+ }
+ if x.Mod.Path == path {
+ hint = x.Syntax
+ }
+ }
+
+ f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)})
+ return nil
+}
+
+func (f *File) DropExclude(path, vers string) error {
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ x.Syntax.markRemoved()
+ *x = Exclude{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers)
+}
+
+func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error {
+ need := true
+ old := module.Version{Path: oldPath, Version: oldVers}
+ new := module.Version{Path: newPath, Version: newVers}
+ tokens := []string{"replace", AutoQuote(oldPath)}
+ if oldVers != "" {
+ tokens = append(tokens, oldVers)
+ }
+ tokens = append(tokens, "=>", AutoQuote(newPath))
+ if newVers != "" {
+ tokens = append(tokens, newVers)
+ }
+
+ var hint *Line
+ for _, r := range *replace {
+ if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) {
+ if need {
+ // Found replacement for old; update to use new.
+ r.New = new
+ syntax.updateLine(r.Syntax, tokens...)
+ need = false
+ continue
+ }
+ // Already added; delete other replacements for same.
+ r.Syntax.markRemoved()
+ *r = Replace{}
+ }
+ if r.Old.Path == oldPath {
+ hint = r.Syntax
+ }
+ }
+ if need {
+ *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)})
+ }
+ return nil
+}
+
+func (f *File) DropReplace(oldPath, oldVers string) error {
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && r.Old.Version == oldVers {
+ r.Syntax.markRemoved()
+ *r = Replace{}
+ }
+ }
+ return nil
+}
+
+// AddRetract adds a retract statement to the mod file. Errors if the provided
+// version interval does not consist of canonical version strings
+func (f *File) AddRetract(vi VersionInterval, rationale string) error {
+ var path string
+ if f.Module != nil {
+ path = f.Module.Mod.Path
+ }
+ if err := checkCanonicalVersion(path, vi.High); err != nil {
+ return err
+ }
+ if err := checkCanonicalVersion(path, vi.Low); err != nil {
+ return err
+ }
+
+ r := &Retract{
+ VersionInterval: vi,
+ }
+ if vi.Low == vi.High {
+ r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low))
+ } else {
+ r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]")
+ }
+ if rationale != "" {
+ for _, line := range strings.Split(rationale, "\n") {
+ com := Comment{Token: "// " + line}
+ r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com)
+ }
+ }
+ return nil
+}
+
+func (f *File) DropRetract(vi VersionInterval) error {
+ for _, r := range f.Retract {
+ if r.VersionInterval == vi {
+ r.Syntax.markRemoved()
+ *r = Retract{}
+ }
+ }
+ return nil
+}
+
+func (f *File) SortBlocks() {
+ f.removeDups() // otherwise sorting is unsafe
+
+ for _, stmt := range f.Syntax.Stmt {
+ block, ok := stmt.(*LineBlock)
+ if !ok {
+ continue
+ }
+ less := lineLess
+ if block.Token[0] == "retract" {
+ less = lineRetractLess
+ }
+ sort.SliceStable(block.Line, func(i, j int) bool {
+ return less(block.Line[i], block.Line[j])
+ })
+ }
+}
+
+// removeDups removes duplicate exclude and replace directives.
+//
+// Earlier exclude directives take priority.
+//
+// Later replace directives take priority.
+//
+// require directives are not de-duplicated. That's left up to higher-level
+// logic (MVS).
+//
+// retract directives are not de-duplicated since comments are
+// meaningful, and versions may be retracted multiple times.
+func (f *File) removeDups() {
+ removeDups(f.Syntax, &f.Exclude, &f.Replace)
+}
+
+func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
+ kill := make(map[*Line]bool)
+
+ // Remove duplicate excludes.
+ if exclude != nil {
+ haveExclude := make(map[module.Version]bool)
+ for _, x := range *exclude {
+ if haveExclude[x.Mod] {
+ kill[x.Syntax] = true
+ continue
+ }
+ haveExclude[x.Mod] = true
+ }
+ var excl []*Exclude
+ for _, x := range *exclude {
+ if !kill[x.Syntax] {
+ excl = append(excl, x)
+ }
+ }
+ *exclude = excl
+ }
+
+ // Remove duplicate replacements.
+ // Later replacements take priority over earlier ones.
+ haveReplace := make(map[module.Version]bool)
+ for i := len(*replace) - 1; i >= 0; i-- {
+ x := (*replace)[i]
+ if haveReplace[x.Old] {
+ kill[x.Syntax] = true
+ continue
+ }
+ haveReplace[x.Old] = true
+ }
+ var repl []*Replace
+ for _, x := range *replace {
+ if !kill[x.Syntax] {
+ repl = append(repl, x)
+ }
+ }
+ *replace = repl
+
+ // Duplicate require and retract directives are not removed.
+
+ // Drop killed statements from the syntax tree.
+ var stmts []Expr
+ for _, stmt := range syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if kill[stmt] {
+ continue
+ }
+ case *LineBlock:
+ var lines []*Line
+ for _, line := range stmt.Line {
+ if !kill[line] {
+ lines = append(lines, line)
+ }
+ }
+ stmt.Line = lines
+ if len(lines) == 0 {
+ continue
+ }
+ }
+ stmts = append(stmts, stmt)
+ }
+ syntax.Stmt = stmts
+}
+
+// lineLess returns whether li should be sorted before lj. It sorts
+// lexicographically without assigning any special meaning to tokens.
+func lineLess(li, lj *Line) bool {
+ for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
+ if li.Token[k] != lj.Token[k] {
+ return li.Token[k] < lj.Token[k]
+ }
+ }
+ return len(li.Token) < len(lj.Token)
+}
+
+// lineRetractLess returns whether li should be sorted before lj for lines in
+// a "retract" block. It treats each line as a version interval. Single versions
+// are compared as if they were intervals with the same low and high version.
+// Intervals are sorted in descending order, first by low version, then by
+// high version, using semver.Compare.
+func lineRetractLess(li, lj *Line) bool {
+ interval := func(l *Line) VersionInterval {
+ if len(l.Token) == 1 {
+ return VersionInterval{Low: l.Token[0], High: l.Token[0]}
+ } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" {
+ return VersionInterval{Low: l.Token[1], High: l.Token[3]}
+ } else {
+ // Line in unknown format. Treat as an invalid version.
+ return VersionInterval{}
+ }
+ }
+ vii := interval(li)
+ vij := interval(lj)
+ if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 {
+ return cmp > 0
+ }
+ return semver.Compare(vii.High, vij.High) > 0
+}
+
+// checkCanonicalVersion returns a non-nil error if vers is not a canonical
+// version string or does not match the major version of path.
+//
+// If path is non-empty, the error text suggests a format with a major version
+// corresponding to the path.
+func checkCanonicalVersion(path, vers string) error {
+ _, pathMajor, pathMajorOk := module.SplitPathVersion(path)
+
+ if vers == "" || vers != module.CanonicalVersion(vers) {
+ if pathMajor == "" {
+ return &module.InvalidVersionError{
+ Version: vers,
+ Err: fmt.Errorf("must be of the form v1.2.3"),
+ }
+ }
+ return &module.InvalidVersionError{
+ Version: vers,
+ Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)),
+ }
+ }
+
+ if pathMajorOk {
+ if err := module.CheckPathMajor(vers, pathMajor); err != nil {
+ if pathMajor == "" {
+ // In this context, the user probably wrote "v2.3.4" when they meant
+ // "v2.3.4+incompatible". Suggest that instead of "v0 or v1".
+ return &module.InvalidVersionError{
+ Version: vers,
+ Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)),
+ }
+ }
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go
new file mode 100644
index 000000000..0c0e52152
--- /dev/null
+++ b/vendor/golang.org/x/mod/modfile/work.go
@@ -0,0 +1,234 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A WorkFile is the parsed, interpreted form of a go.work file.
+type WorkFile struct {
+ Go *Go
+ Use []*Use
+ Replace []*Replace
+
+ Syntax *FileSyntax
+}
+
+// A Use is a single directory statement.
+type Use struct {
+ Path string // Use path of module.
+ ModulePath string // Module path in the comment.
+ Syntax *Line
+}
+
+// ParseWork parses and returns a go.work file.
+//
+// file is the name of the file, used in positions and errors.
+//
+// data is the content of the file.
+//
+// fix is an optional function that canonicalizes module versions.
+// If fix is nil, all module versions must be canonical (module.CanonicalVersion
+// must return the same string).
+func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) {
+ fs, err := parse(file, data)
+ if err != nil {
+ return nil, err
+ }
+ f := &WorkFile{
+ Syntax: fs,
+ }
+ var errs ErrorList
+
+ for _, x := range fs.Stmt {
+ switch x := x.(type) {
+ case *Line:
+ f.add(&errs, x, x.Token[0], x.Token[1:], fix)
+
+ case *LineBlock:
+ if len(x.Token) > 1 {
+ errs = append(errs, Error{
+ Filename: file,
+ Pos: x.Start,
+ Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+ })
+ continue
+ }
+ switch x.Token[0] {
+ default:
+ errs = append(errs, Error{
+ Filename: file,
+ Pos: x.Start,
+ Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")),
+ })
+ continue
+ case "use", "replace":
+ for _, l := range x.Line {
+ f.add(&errs, l, x.Token[0], l.Token, fix)
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return f, nil
+}
+
+// Cleanup cleans up the file f after any edit operations.
+// To avoid quadratic behavior, modifications like DropRequire
+// clear the entry but do not remove it from the slice.
+// Cleanup cleans out all the cleared entries.
+func (f *WorkFile) Cleanup() {
+ w := 0
+ for _, r := range f.Use {
+ if r.Path != "" {
+ f.Use[w] = r
+ w++
+ }
+ }
+ f.Use = f.Use[:w]
+
+ w = 0
+ for _, r := range f.Replace {
+ if r.Old.Path != "" {
+ f.Replace[w] = r
+ w++
+ }
+ }
+ f.Replace = f.Replace[:w]
+
+ f.Syntax.Cleanup()
+}
+
+func (f *WorkFile) AddGoStmt(version string) error {
+ if !GoVersionRE.MatchString(version) {
+ return fmt.Errorf("invalid language version string %q", version)
+ }
+ if f.Go == nil {
+ stmt := &Line{Token: []string{"go", version}}
+ f.Go = &Go{
+ Version: version,
+ Syntax: stmt,
+ }
+ // Find the first non-comment-only block that's and add
+ // the go statement before it. That will keep file comments at the top.
+ i := 0
+ for i = 0; i < len(f.Syntax.Stmt); i++ {
+ if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok {
+ break
+ }
+ }
+ f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...)
+ } else {
+ f.Go.Version = version
+ f.Syntax.updateLine(f.Go.Syntax, "go", version)
+ }
+ return nil
+}
+
+func (f *WorkFile) AddUse(diskPath, modulePath string) error {
+ need := true
+ for _, d := range f.Use {
+ if d.Path == diskPath {
+ if need {
+ d.ModulePath = modulePath
+ f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath))
+ need = false
+ } else {
+ d.Syntax.markRemoved()
+ *d = Use{}
+ }
+ }
+ }
+
+ if need {
+ f.AddNewUse(diskPath, modulePath)
+ }
+ return nil
+}
+
+func (f *WorkFile) AddNewUse(diskPath, modulePath string) {
+ line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath))
+ f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line})
+}
+
+func (f *WorkFile) SetUse(dirs []*Use) {
+ need := make(map[string]string)
+ for _, d := range dirs {
+ need[d.Path] = d.ModulePath
+ }
+
+ for _, d := range f.Use {
+ if modulePath, ok := need[d.Path]; ok {
+ d.ModulePath = modulePath
+ } else {
+ d.Syntax.markRemoved()
+ *d = Use{}
+ }
+ }
+
+ // TODO(#45713): Add module path to comment.
+
+ for diskPath, modulePath := range need {
+ f.AddNewUse(diskPath, modulePath)
+ }
+ f.SortBlocks()
+}
+
+func (f *WorkFile) DropUse(path string) error {
+ for _, d := range f.Use {
+ if d.Path == path {
+ d.Syntax.markRemoved()
+ *d = Use{}
+ }
+ }
+ return nil
+}
+
+func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers)
+}
+
+func (f *WorkFile) DropReplace(oldPath, oldVers string) error {
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && r.Old.Version == oldVers {
+ r.Syntax.markRemoved()
+ *r = Replace{}
+ }
+ }
+ return nil
+}
+
+func (f *WorkFile) SortBlocks() {
+ f.removeDups() // otherwise sorting is unsafe
+
+ for _, stmt := range f.Syntax.Stmt {
+ block, ok := stmt.(*LineBlock)
+ if !ok {
+ continue
+ }
+ sort.SliceStable(block.Line, func(i, j int) bool {
+ return lineLess(block.Line[i], block.Line[j])
+ })
+ }
+}
+
+// removeDups removes duplicate replace directives.
+//
+// Later replace directives take priority.
+//
+// require directives are not de-duplicated. That's left up to higher-level
+// logic (MVS).
+//
+// retract directives are not de-duplicated since comments are
+// meaningful, and versions may be retracted multiple times.
+func (f *WorkFile) removeDups() {
+ removeDups(f.Syntax, nil, &f.Replace)
+}
diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go
index c26d1d29e..e9dec6e61 100644
--- a/vendor/golang.org/x/mod/module/module.go
+++ b/vendor/golang.org/x/mod/module/module.go
@@ -96,13 +96,13 @@ package module
// Changes to the semantics in this file require approval from rsc.
import (
+ "errors"
"fmt"
"path"
"sort"
"strings"
"unicode"
"unicode/utf8"
- "errors"
"golang.org/x/mod/semver"
)
@@ -258,7 +258,7 @@ func modPathOK(r rune) bool {
return false
}
-// modPathOK reports whether r can appear in a package import path element.
+// importPathOK reports whether r can appear in a package import path element.
//
// Import paths are intermediate between module paths and file paths: we allow
// disallow characters that would be confusing or ambiguous as arguments to
diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go
index cf8947c33..0aa307c06 100644
--- a/vendor/golang.org/x/net/bpf/vm_instructions.go
+++ b/vendor/golang.org/x/net/bpf/vm_instructions.go
@@ -94,7 +94,7 @@ func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value u
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
offset := int(ins.Off)
- size := int(ins.Size)
+ size := ins.Size
return loadCommon(in, offset, size)
}
@@ -121,7 +121,7 @@ func loadExtension(ins LoadExtension, in []byte) uint32 {
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
offset := int(ins.Off) + int(regX)
- size := int(ins.Size)
+ size := ins.Size
return loadCommon(in, offset, size)
}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
index 0a54bdbcc..2cb9c408f 100644
--- a/vendor/golang.org/x/net/context/go17.go
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
- return ctx, CancelFunc(f)
+ return ctx, f
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
@@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
- return ctx, CancelFunc(f)
+ return ctx, f
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
index 822ed42a0..2466ae3d9 100644
--- a/vendor/golang.org/x/net/html/doc.go
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -92,6 +92,27 @@ example, to process each anchor node in depth-first order:
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+
+# Security Considerations
+
+Care should be taken when parsing and interpreting HTML, whether full documents
+or fragments, within the framework of the HTML specification, especially with
+regard to untrusted inputs.
+
+This package provides both a tokenizer and a parser, which implement the
+tokenization, and tokenization and tree construction stages of the WHATWG HTML
+parsing specification respectively. While the tokenizer parses and normalizes
+individual HTML tokens, only the parser constructs the DOM tree from the
+tokenized HTML, as described in the tree construction stage of the
+specification, dynamically modifying or extending the docuemnt's DOM tree.
+
+If your use case requires semantically well-formed HTML documents, as defined by
+the WHATWG specification, the parser should be used rather than the tokenizer.
+
+In security contexts, if trust decisions are being made using the tokenized or
+parsed content, the input must be re-serialized (for instance by using Render or
+Token.String) in order for those trust decisions to hold, as the process of
+tokenization or parsing may alter the content.
*/
package html // import "golang.org/x/net/html"
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
index d85613962..04c6bec21 100644
--- a/vendor/golang.org/x/net/html/escape.go
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -193,6 +193,87 @@ func lower(b []byte) []byte {
return b
}
+// escapeComment is like func escape but escapes its input bytes less often.
+// Per https://github.com/golang/go/issues/58246 some HTML comments are (1)
+// meaningful and (2) contain angle brackets that we'd like to avoid escaping
+// unless we have to.
+//
+// "We have to" includes the '&' byte, since that introduces other escapes.
+//
+// It also includes those bytes (not including EOF) that would otherwise end
+// the comment. Per the summary table at the bottom of comment_test.go, this is
+// the '>' byte that, per above, we'd like to avoid escaping unless we have to.
+//
+// Studying the summary table (and T actions in its '>' column) closely, we
+// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
+// start of the comment data. State 52 is after a '!'. The other three states
+// are after a '-'.
+//
+// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
+// - The '>' is after a '!' or '-' (in the unescaped data) or
+// - The '>' is at the start of the comment data (after the opening ""); err != nil {
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
index be3c75414..5c2a1f4ef 100644
--- a/vendor/golang.org/x/net/html/token.go
+++ b/vendor/golang.org/x/net/html/token.go
@@ -110,7 +110,7 @@ func (t Token) String() string {
case SelfClosingTagToken:
return "<" + t.tagString() + "/>"
case CommentToken:
- return ""
+ return ""
case DoctypeToken:
return ""
}
@@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd:
// readComment reads the next comment token starting with "")
return
}
@@ -628,17 +632,50 @@ func (z *Tokenizer) readComment() {
if dashCount >= 2 {
c = z.readByte()
if z.err != nil {
- z.data.end = z.raw.end
+ z.data.end = z.calculateAbruptCommentDataEnd()
return
- }
- if c == '>' {
+ } else if c == '>' {
z.data.end = z.raw.end - len("--!>")
return
+ } else if c == '-' {
+ dashCount = 1
+ beginning = false
+ continue
}
}
}
dashCount = 0
+ beginning = false
+ }
+}
+
+func (z *Tokenizer) calculateAbruptCommentDataEnd() int {
+ raw := z.Raw()
+ const prefixLen = len("